[{"data":1,"prerenderedAt":9103},["ShallowReactive",2],{"/en-us/blog/tags/devops/":3,"navigation-en-us":20,"banner-en-us":438,"footer-en-us":450,"DevOps-tag-page-en-us":661},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"content":8,"config":11,"_id":13,"_type":14,"title":15,"_source":16,"_file":17,"_stem":18,"_extension":19},"/en-us/blog/tags/devops","tags",false,"",{"tag":9,"tagSlug":10},"DevOps","devops",{"template":12},"BlogTag","content:en-us:blog:tags:devops.yml","yaml","Devops","content","en-us/blog/tags/devops.yml","en-us/blog/tags/devops","yml",{"_path":21,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"data":23,"_id":434,"_type":14,"title":435,"_source":16,"_file":436,"_stem":437,"_extension":19},"/shared/en-us/main-navigation","en-us",{"logo":24,"freeTrial":29,"sales":34,"login":39,"items":44,"search":375,"minimal":406,"duo":425},{"config":25},{"href":26,"dataGaName":27,"dataGaLocation":28},"/","gitlab logo","header",{"text":30,"config":31},"Get free trial",{"href":32,"dataGaName":33,"dataGaLocation":28},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com&glm_content=default-saas-trial/","free trial",{"text":35,"config":36},"Talk to sales",{"href":37,"dataGaName":38,"dataGaLocation":28},"/sales/","sales",{"text":40,"config":41},"Sign in",{"href":42,"dataGaName":43,"dataGaLocation":28},"https://gitlab.com/users/sign_in/","sign in",[45,89,185,190,296,356],{"text":46,"config":47,"cards":49,"footer":72},"Platform",{"dataNavLevelOne":48},"platform",[50,56,64],{"title":46,"description":51,"link":52},"The most comprehensive AI-powered DevSecOps Platform",{"text":53,"config":54},"Explore our Platform",{"href":55,"dataGaName":48,"dataGaLocation":28},"/platform/",{"title":57,"description":58,"link":59},"GitLab Duo (AI)","Build software faster with AI at every stage of development",{"text":60,"config":61},"Meet GitLab Duo",{"href":62,"dataGaName":63,"dataGaLocation":28},"/gitlab-duo/","gitlab duo ai",{"title":65,"description":66,"link":67},"Why GitLab","10 reasons why Enterprises choose GitLab",{"text":68,"config":69},"Learn more",{"href":70,"dataGaName":71,"dataGaLocation":28},"/why-gitlab/","why gitlab",{"title":73,"items":74},"Get started with",[75,80,85],{"text":76,"config":77},"Platform Engineering",{"href":78,"dataGaName":79,"dataGaLocation":28},"/solutions/platform-engineering/","platform engineering",{"text":81,"config":82},"Developer Experience",{"href":83,"dataGaName":84,"dataGaLocation":28},"/developer-experience/","Developer experience",{"text":86,"config":87},"MLOps",{"href":88,"dataGaName":86,"dataGaLocation":28},"/topics/devops/the-role-of-ai-in-devops/",{"text":90,"left":91,"config":92,"link":94,"lists":98,"footer":167},"Product",true,{"dataNavLevelOne":93},"solutions",{"text":95,"config":96},"View all Solutions",{"href":97,"dataGaName":93,"dataGaLocation":28},"/solutions/",[99,124,146],{"title":100,"description":101,"link":102,"items":107},"Automation","CI/CD and automation to accelerate deployment",{"config":103},{"icon":104,"href":105,"dataGaName":106,"dataGaLocation":28},"AutomatedCodeAlt","/solutions/delivery-automation/","automated software delivery",[108,112,116,120],{"text":109,"config":110},"CI/CD",{"href":111,"dataGaLocation":28,"dataGaName":109},"/solutions/continuous-integration/",{"text":113,"config":114},"AI-Assisted Development",{"href":62,"dataGaLocation":28,"dataGaName":115},"AI assisted development",{"text":117,"config":118},"Source Code Management",{"href":119,"dataGaLocation":28,"dataGaName":117},"/solutions/source-code-management/",{"text":121,"config":122},"Automated Software Delivery",{"href":105,"dataGaLocation":28,"dataGaName":123},"Automated software delivery",{"title":125,"description":126,"link":127,"items":132},"Security","Deliver code faster without compromising security",{"config":128},{"href":129,"dataGaName":130,"dataGaLocation":28,"icon":131},"/solutions/security-compliance/","security and compliance","ShieldCheckLight",[133,136,141],{"text":134,"config":135},"Security & Compliance",{"href":129,"dataGaLocation":28,"dataGaName":134},{"text":137,"config":138},"Software Supply Chain Security",{"href":139,"dataGaLocation":28,"dataGaName":140},"/solutions/supply-chain/","Software supply chain security",{"text":142,"config":143},"Compliance & Governance",{"href":144,"dataGaLocation":28,"dataGaName":145},"/solutions/continuous-software-compliance/","Compliance and governance",{"title":147,"link":148,"items":153},"Measurement",{"config":149},{"icon":150,"href":151,"dataGaName":152,"dataGaLocation":28},"DigitalTransformation","/solutions/visibility-measurement/","visibility and measurement",[154,158,162],{"text":155,"config":156},"Visibility & Measurement",{"href":151,"dataGaLocation":28,"dataGaName":157},"Visibility and Measurement",{"text":159,"config":160},"Value Stream Management",{"href":161,"dataGaLocation":28,"dataGaName":159},"/solutions/value-stream-management/",{"text":163,"config":164},"Analytics & Insights",{"href":165,"dataGaLocation":28,"dataGaName":166},"/solutions/analytics-and-insights/","Analytics and insights",{"title":168,"items":169},"GitLab for",[170,175,180],{"text":171,"config":172},"Enterprise",{"href":173,"dataGaLocation":28,"dataGaName":174},"/enterprise/","enterprise",{"text":176,"config":177},"Small Business",{"href":178,"dataGaLocation":28,"dataGaName":179},"/small-business/","small business",{"text":181,"config":182},"Public Sector",{"href":183,"dataGaLocation":28,"dataGaName":184},"/solutions/public-sector/","public sector",{"text":186,"config":187},"Pricing",{"href":188,"dataGaName":189,"dataGaLocation":28,"dataNavLevelOne":189},"/pricing/","pricing",{"text":191,"config":192,"link":194,"lists":198,"feature":283},"Resources",{"dataNavLevelOne":193},"resources",{"text":195,"config":196},"View all resources",{"href":197,"dataGaName":193,"dataGaLocation":28},"/resources/",[199,232,255],{"title":200,"items":201},"Getting started",[202,207,212,217,222,227],{"text":203,"config":204},"Install",{"href":205,"dataGaName":206,"dataGaLocation":28},"/install/","install",{"text":208,"config":209},"Quick start guides",{"href":210,"dataGaName":211,"dataGaLocation":28},"/get-started/","quick setup checklists",{"text":213,"config":214},"Learn",{"href":215,"dataGaLocation":28,"dataGaName":216},"https://university.gitlab.com/","learn",{"text":218,"config":219},"Product documentation",{"href":220,"dataGaName":221,"dataGaLocation":28},"https://docs.gitlab.com/","product documentation",{"text":223,"config":224},"Best practice videos",{"href":225,"dataGaName":226,"dataGaLocation":28},"/getting-started-videos/","best practice videos",{"text":228,"config":229},"Integrations",{"href":230,"dataGaName":231,"dataGaLocation":28},"/integrations/","integrations",{"title":233,"items":234},"Discover",[235,240,245,250],{"text":236,"config":237},"Customer success stories",{"href":238,"dataGaName":239,"dataGaLocation":28},"/customers/","customer success stories",{"text":241,"config":242},"Blog",{"href":243,"dataGaName":244,"dataGaLocation":28},"/blog/","blog",{"text":246,"config":247},"Remote",{"href":248,"dataGaName":249,"dataGaLocation":28},"https://handbook.gitlab.com/handbook/company/culture/all-remote/","remote",{"text":251,"config":252},"TeamOps",{"href":253,"dataGaName":254,"dataGaLocation":28},"/teamops/","teamops",{"title":256,"items":257},"Connect",[258,263,268,273,278],{"text":259,"config":260},"GitLab Services",{"href":261,"dataGaName":262,"dataGaLocation":28},"/services/","services",{"text":264,"config":265},"Community",{"href":266,"dataGaName":267,"dataGaLocation":28},"/community/","community",{"text":269,"config":270},"Forum",{"href":271,"dataGaName":272,"dataGaLocation":28},"https://forum.gitlab.com/","forum",{"text":274,"config":275},"Events",{"href":276,"dataGaName":277,"dataGaLocation":28},"/events/","events",{"text":279,"config":280},"Partners",{"href":281,"dataGaName":282,"dataGaLocation":28},"/partners/","partners",{"backgroundColor":284,"textColor":285,"text":286,"image":287,"link":291},"#2f2a6b","#fff","Insights for the future of software development",{"altText":288,"config":289},"the source promo card",{"src":290},"/images/navigation/the-source-promo-card.svg",{"text":292,"config":293},"Read the latest",{"href":294,"dataGaName":295,"dataGaLocation":28},"/the-source/","the source",{"text":297,"config":298,"lists":300},"Company",{"dataNavLevelOne":299},"company",[301],{"items":302},[303,308,314,316,321,326,331,336,341,346,351],{"text":304,"config":305},"About",{"href":306,"dataGaName":307,"dataGaLocation":28},"/company/","about",{"text":309,"config":310,"footerGa":313},"Jobs",{"href":311,"dataGaName":312,"dataGaLocation":28},"/jobs/","jobs",{"dataGaName":312},{"text":274,"config":315},{"href":276,"dataGaName":277,"dataGaLocation":28},{"text":317,"config":318},"Leadership",{"href":319,"dataGaName":320,"dataGaLocation":28},"/company/team/e-group/","leadership",{"text":322,"config":323},"Team",{"href":324,"dataGaName":325,"dataGaLocation":28},"/company/team/","team",{"text":327,"config":328},"Handbook",{"href":329,"dataGaName":330,"dataGaLocation":28},"https://handbook.gitlab.com/","handbook",{"text":332,"config":333},"Investor relations",{"href":334,"dataGaName":335,"dataGaLocation":28},"https://ir.gitlab.com/","investor relations",{"text":337,"config":338},"Trust Center",{"href":339,"dataGaName":340,"dataGaLocation":28},"/security/","trust center",{"text":342,"config":343},"AI Transparency Center",{"href":344,"dataGaName":345,"dataGaLocation":28},"/ai-transparency-center/","ai transparency center",{"text":347,"config":348},"Newsletter",{"href":349,"dataGaName":350,"dataGaLocation":28},"/company/contact/","newsletter",{"text":352,"config":353},"Press",{"href":354,"dataGaName":355,"dataGaLocation":28},"/press/","press",{"text":357,"config":358,"lists":359},"Contact us",{"dataNavLevelOne":299},[360],{"items":361},[362,365,370],{"text":35,"config":363},{"href":37,"dataGaName":364,"dataGaLocation":28},"talk to sales",{"text":366,"config":367},"Get help",{"href":368,"dataGaName":369,"dataGaLocation":28},"/support/","get help",{"text":371,"config":372},"Customer portal",{"href":373,"dataGaName":374,"dataGaLocation":28},"https://customers.gitlab.com/customers/sign_in/","customer portal",{"close":376,"login":377,"suggestions":384},"Close",{"text":378,"link":379},"To search repositories and projects, login to",{"text":380,"config":381},"gitlab.com",{"href":42,"dataGaName":382,"dataGaLocation":383},"search login","search",{"text":385,"default":386},"Suggestions",[387,389,393,395,399,403],{"text":57,"config":388},{"href":62,"dataGaName":57,"dataGaLocation":383},{"text":390,"config":391},"Code Suggestions (AI)",{"href":392,"dataGaName":390,"dataGaLocation":383},"/solutions/code-suggestions/",{"text":109,"config":394},{"href":111,"dataGaName":109,"dataGaLocation":383},{"text":396,"config":397},"GitLab on AWS",{"href":398,"dataGaName":396,"dataGaLocation":383},"/partners/technology-partners/aws/",{"text":400,"config":401},"GitLab on Google Cloud",{"href":402,"dataGaName":400,"dataGaLocation":383},"/partners/technology-partners/google-cloud-platform/",{"text":404,"config":405},"Why GitLab?",{"href":70,"dataGaName":404,"dataGaLocation":383},{"freeTrial":407,"mobileIcon":412,"desktopIcon":417,"secondaryButton":420},{"text":408,"config":409},"Start free trial",{"href":410,"dataGaName":33,"dataGaLocation":411},"https://gitlab.com/-/trials/new/","nav",{"altText":413,"config":414},"Gitlab Icon",{"src":415,"dataGaName":416,"dataGaLocation":411},"/images/brand/gitlab-logo-tanuki.svg","gitlab icon",{"altText":413,"config":418},{"src":419,"dataGaName":416,"dataGaLocation":411},"/images/brand/gitlab-logo-type.svg",{"text":421,"config":422},"Get Started",{"href":423,"dataGaName":424,"dataGaLocation":411},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/compare/gitlab-vs-github/","get started",{"freeTrial":426,"mobileIcon":430,"desktopIcon":432},{"text":427,"config":428},"Learn more about GitLab Duo",{"href":62,"dataGaName":429,"dataGaLocation":411},"gitlab duo",{"altText":413,"config":431},{"src":415,"dataGaName":416,"dataGaLocation":411},{"altText":413,"config":433},{"src":419,"dataGaName":416,"dataGaLocation":411},"content:shared:en-us:main-navigation.yml","Main Navigation","shared/en-us/main-navigation.yml","shared/en-us/main-navigation",{"_path":439,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"title":440,"button":441,"config":445,"_id":447,"_type":14,"_source":16,"_file":448,"_stem":449,"_extension":19},"/shared/en-us/banner","GitLab Duo Agent Platform is now in public beta!",{"text":68,"config":442},{"href":443,"dataGaName":444,"dataGaLocation":28},"/gitlab-duo/agent-platform/","duo banner",{"layout":446},"release","content:shared:en-us:banner.yml","shared/en-us/banner.yml","shared/en-us/banner",{"_path":451,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"data":452,"_id":657,"_type":14,"title":658,"_source":16,"_file":659,"_stem":660,"_extension":19},"/shared/en-us/main-footer",{"text":453,"source":454,"edit":460,"contribute":465,"config":470,"items":475,"minimal":649},"Git is a trademark of Software Freedom Conservancy and our use of 'GitLab' is under license",{"text":455,"config":456},"View page source",{"href":457,"dataGaName":458,"dataGaLocation":459},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/","page source","footer",{"text":461,"config":462},"Edit this page",{"href":463,"dataGaName":464,"dataGaLocation":459},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/content/","web ide",{"text":466,"config":467},"Please contribute",{"href":468,"dataGaName":469,"dataGaLocation":459},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/CONTRIBUTING.md/","please contribute",{"twitter":471,"facebook":472,"youtube":473,"linkedin":474},"https://twitter.com/gitlab","https://www.facebook.com/gitlab","https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg","https://www.linkedin.com/company/gitlab-com",[476,499,556,585,619],{"title":46,"links":477,"subMenu":482},[478],{"text":479,"config":480},"DevSecOps platform",{"href":55,"dataGaName":481,"dataGaLocation":459},"devsecops platform",[483],{"title":186,"links":484},[485,489,494],{"text":486,"config":487},"View plans",{"href":188,"dataGaName":488,"dataGaLocation":459},"view plans",{"text":490,"config":491},"Why Premium?",{"href":492,"dataGaName":493,"dataGaLocation":459},"/pricing/premium/","why premium",{"text":495,"config":496},"Why Ultimate?",{"href":497,"dataGaName":498,"dataGaLocation":459},"/pricing/ultimate/","why ultimate",{"title":500,"links":501},"Solutions",[502,507,510,512,517,522,526,529,533,538,540,543,546,551],{"text":503,"config":504},"Digital transformation",{"href":505,"dataGaName":506,"dataGaLocation":459},"/topics/digital-transformation/","digital transformation",{"text":134,"config":508},{"href":129,"dataGaName":509,"dataGaLocation":459},"security & compliance",{"text":123,"config":511},{"href":105,"dataGaName":106,"dataGaLocation":459},{"text":513,"config":514},"Agile development",{"href":515,"dataGaName":516,"dataGaLocation":459},"/solutions/agile-delivery/","agile delivery",{"text":518,"config":519},"Cloud transformation",{"href":520,"dataGaName":521,"dataGaLocation":459},"/topics/cloud-native/","cloud transformation",{"text":523,"config":524},"SCM",{"href":119,"dataGaName":525,"dataGaLocation":459},"source code management",{"text":109,"config":527},{"href":111,"dataGaName":528,"dataGaLocation":459},"continuous integration & delivery",{"text":530,"config":531},"Value stream management",{"href":161,"dataGaName":532,"dataGaLocation":459},"value stream management",{"text":534,"config":535},"GitOps",{"href":536,"dataGaName":537,"dataGaLocation":459},"/solutions/gitops/","gitops",{"text":171,"config":539},{"href":173,"dataGaName":174,"dataGaLocation":459},{"text":541,"config":542},"Small business",{"href":178,"dataGaName":179,"dataGaLocation":459},{"text":544,"config":545},"Public sector",{"href":183,"dataGaName":184,"dataGaLocation":459},{"text":547,"config":548},"Education",{"href":549,"dataGaName":550,"dataGaLocation":459},"/solutions/education/","education",{"text":552,"config":553},"Financial services",{"href":554,"dataGaName":555,"dataGaLocation":459},"/solutions/finance/","financial services",{"title":191,"links":557},[558,560,562,564,567,569,571,573,575,577,579,581,583],{"text":203,"config":559},{"href":205,"dataGaName":206,"dataGaLocation":459},{"text":208,"config":561},{"href":210,"dataGaName":211,"dataGaLocation":459},{"text":213,"config":563},{"href":215,"dataGaName":216,"dataGaLocation":459},{"text":218,"config":565},{"href":220,"dataGaName":566,"dataGaLocation":459},"docs",{"text":241,"config":568},{"href":243,"dataGaName":244,"dataGaLocation":459},{"text":236,"config":570},{"href":238,"dataGaName":239,"dataGaLocation":459},{"text":246,"config":572},{"href":248,"dataGaName":249,"dataGaLocation":459},{"text":259,"config":574},{"href":261,"dataGaName":262,"dataGaLocation":459},{"text":251,"config":576},{"href":253,"dataGaName":254,"dataGaLocation":459},{"text":264,"config":578},{"href":266,"dataGaName":267,"dataGaLocation":459},{"text":269,"config":580},{"href":271,"dataGaName":272,"dataGaLocation":459},{"text":274,"config":582},{"href":276,"dataGaName":277,"dataGaLocation":459},{"text":279,"config":584},{"href":281,"dataGaName":282,"dataGaLocation":459},{"title":297,"links":586},[587,589,591,593,595,597,599,603,608,610,612,614],{"text":304,"config":588},{"href":306,"dataGaName":299,"dataGaLocation":459},{"text":309,"config":590},{"href":311,"dataGaName":312,"dataGaLocation":459},{"text":317,"config":592},{"href":319,"dataGaName":320,"dataGaLocation":459},{"text":322,"config":594},{"href":324,"dataGaName":325,"dataGaLocation":459},{"text":327,"config":596},{"href":329,"dataGaName":330,"dataGaLocation":459},{"text":332,"config":598},{"href":334,"dataGaName":335,"dataGaLocation":459},{"text":600,"config":601},"Sustainability",{"href":602,"dataGaName":600,"dataGaLocation":459},"/sustainability/",{"text":604,"config":605},"Diversity, inclusion and belonging (DIB)",{"href":606,"dataGaName":607,"dataGaLocation":459},"/diversity-inclusion-belonging/","Diversity, inclusion and belonging",{"text":337,"config":609},{"href":339,"dataGaName":340,"dataGaLocation":459},{"text":347,"config":611},{"href":349,"dataGaName":350,"dataGaLocation":459},{"text":352,"config":613},{"href":354,"dataGaName":355,"dataGaLocation":459},{"text":615,"config":616},"Modern Slavery Transparency Statement",{"href":617,"dataGaName":618,"dataGaLocation":459},"https://handbook.gitlab.com/handbook/legal/modern-slavery-act-transparency-statement/","modern slavery transparency statement",{"title":620,"links":621},"Contact Us",[622,625,627,629,634,639,644],{"text":623,"config":624},"Contact an expert",{"href":37,"dataGaName":38,"dataGaLocation":459},{"text":366,"config":626},{"href":368,"dataGaName":369,"dataGaLocation":459},{"text":371,"config":628},{"href":373,"dataGaName":374,"dataGaLocation":459},{"text":630,"config":631},"Status",{"href":632,"dataGaName":633,"dataGaLocation":459},"https://status.gitlab.com/","status",{"text":635,"config":636},"Terms of use",{"href":637,"dataGaName":638,"dataGaLocation":459},"/terms/","terms of use",{"text":640,"config":641},"Privacy statement",{"href":642,"dataGaName":643,"dataGaLocation":459},"/privacy/","privacy statement",{"text":645,"config":646},"Cookie preferences",{"dataGaName":647,"dataGaLocation":459,"id":648,"isOneTrustButton":91},"cookie preferences","ot-sdk-btn",{"items":650},[651,653,655],{"text":635,"config":652},{"href":637,"dataGaName":638,"dataGaLocation":459},{"text":640,"config":654},{"href":642,"dataGaName":643,"dataGaLocation":459},{"text":645,"config":656},{"dataGaName":647,"dataGaLocation":459,"id":648,"isOneTrustButton":91},"content:shared:en-us:main-footer.yml","Main Footer","shared/en-us/main-footer.yml","shared/en-us/main-footer",{"allPosts":662,"featuredPost":9080,"totalPagesCount":9101,"initialPosts":9102},[663,691,712,735,756,778,800,821,841,862,882,902,923,943,962,985,1006,1026,1048,1067,1086,1105,1124,1145,1165,1188,1208,1229,1250,1270,1288,1306,1326,1346,1367,1388,1407,1425,1444,1464,1484,1503,1522,1541,1559,1579,1599,1619,1638,1657,1676,1697,1717,1738,1757,1776,1796,1815,1837,1856,1875,1893,1911,1931,1950,1971,1991,2011,2032,2050,2069,2090,2110,2130,2151,2171,2190,2209,2229,2250,2269,2288,2306,2326,2346,2365,2384,2404,2423,2442,2462,2483,2502,2521,2542,2562,2581,2599,2618,2636,2655,2674,2693,2712,2731,2751,2770,2792,2811,2832,2852,2872,2891,2910,2930,2949,2968,2988,3008,3027,3045,3064,3082,3102,3121,3140,3159,3180,3200,3219,3239,3258,3277,3298,3316,3337,3357,3378,3398,3420,3438,3456,3475,3495,3514,3533,3553,3573,3592,3612,3630,3652,3671,3691,3713,3731,3750,3769,3788,3806,3824,3843,3861,3880,3899,3920,3939,3960,3980,4000,4019,4037,4057,4077,4097,4117,4136,4156,4175,4195,4214,4233,4252,4271,4289,4309,4329,4347,4367,4385,4404,4422,4441,4461,4479,4498,4516,4535,4554,4572,4590,4610,4630,4650,4668,4688,4706,4724,4744,4763,4783,4801,4820,4839,4858,4878,4896,4915,4933,4953,4973,4992,5010,5029,5048,5067,5087,5106,5125,5144,5163,5181,5202,5221,5240,5261,5283,5301,5320,5339,5357,5376,5395,5415,5434,5454,5473,5492,5510,5530,5550,5570,5589,5608,5628,5647,5665,5684,5704,5723,5740,5759,5778,5797,5816,5835,5855,5874,5892,5912,5931,5951,5970,5991,6010,6028,6047,6067,6085,6102,6122,6140,6160,6179,6198,6217,6237,6255,6274,6293,6312,6330,6349,6367,6386,6406,6424,6442,6461,6479,6498,6517,6536,6555,6573,6592,6611,6629,6647,6663,6681,6700,6720,6738,6757,6775,6794,6813,6832,6850,6870,6890,6910,6930,6949,6967,6987,7005,7024,7043,7063,7082,7102,7121,7139,7158,7176,7195,7213,7231,7250,7271,7290,7310,7329,7348,7365,7384,7402,7422,7441,7460,7478,7497,7516,7535,7554,7574,7592,7611,7630,7649,7666,7685,7704,7722,7740,7759,7777,7795,7815,7833,7852,7871,7890,7908,7927,7945,7962,7979,7998,8016,8034,8053,8072,8090,8108,8126,8145,8163,8182,8199,8218,8237,8257,8275,8294,8314,8334,8352,8371,8390,8409,8428,8446,8467,8485,8504,8523,8541,8561,8579,8598,8618,8637,8658,8676,8694,8713,8732,8750,8768,8786,8804,8824,8844,8862,8880,8899,8916,8934,8952,8970,8988,9006,9025,9043,9061],{"_path":664,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":665,"content":673,"config":684,"_id":687,"_type":14,"title":688,"_source":16,"_file":689,"_stem":690,"_extension":19},"/en-us/blog/2018-global-developer-report",{"title":666,"description":667,"ogTitle":666,"ogDescription":667,"noIndex":6,"ogImage":668,"ogUrl":669,"ogSiteName":670,"ogType":671,"canonicalUrls":669,"schema":672},"Global Developer Report - 2018 for Open Source & DevOps","We surveyed over 5,000 software professionals to examine current attitudes and perception of the state of culture, workflow, and tooling within IT organizations.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663993/Blog/Hero%20Images/2018-developer-report-cover.jpg","https://about.gitlab.com/blog/2018-global-developer-report","https://about.gitlab.com","article","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Global Developer Report confirms 2018 is the year for open source and DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Erica Lindberg\"}],\n        \"datePublished\": \"2018-03-07\",\n      }",{"title":674,"description":667,"authors":675,"heroImage":668,"date":677,"body":678,"category":679,"tags":680},"Global Developer Report confirms 2018 is the year for open source and DevOps",[676],"Erica Lindberg","2018-03-07","\n_Our [2022 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/previous/2022/) is out now! Learn the latest in DevOps insights from over 5,000 DevOps professionals._\n\nFrom the junior developer with just a handful of years’ experience to the software professional who’s been in the game for decades, we set out to see how the people behind the software are dealing with a rapidly changing technology landscape. This year’s survey reveals that unclear direction is a developer’s greatest challenge, IT managers are investing the most in continuous integration and delivery, and nearly all agree that the importance of open source cannot be overstated.\n\n\u003C!--more -->\n\nThe focus of [GitLab’s 2018 Global Developer survey](/developer-survey/previous/2018/) was to understand developers’ attitudes toward their workplace, uncover disparities between developers and their management, and benchmark the state of culture, workflow, and tooling within IT organizations. We asked a broad set of questions covering everything from developers’ opinions on their teams’ ability to collaborate and succeed at work to their preferences on workflow methodology and tooling.\n\n\u003Cdiv style=\"text-align: center\"> 🎙\u003Cstrong>\u003Ca href=\"https://webinars.devops.com/top-5-takeaways-from-the-2018-global-developer-survey\"> Join us March 29 for a live discussion with Alan Shimel of DevOps.com on the top 5 takeaways from the report\u003C/a> \u003C/strong> 🎙 ️\u003C/div>\n\n## Developer satisfaction\n\nWe found that the majority of developers are satisfied with the conditions of their workplace, and managers should focus on improving the planning and testing phases of the development lifecycle. We also found that IT management is more optimistic in their perception of overall workplace satisfaction with roughly 10 percent more respondents agreeing their team is set up to succeed, and that project requirements and deadlines are set up front.\n\n\u003Cimg src=\"/images/blogimages/2018-developer-report-stats_2x.jpg\" alt=\"2018 Developer Report\" style=\"width: 900px;\"/>\n\nDelays during the planning phase emerged as a top challenge for all respondents and unclear direction remains the greatest challenge to getting work done for developers.\n\n## DevOps\n\nCommitment to and demand for DevOps is growing, despite challenges posed by outmoded tooling and cultural resistance to change. Adoption is still in early stages, with 23 percent identifying DevOps as their development methodology, but this is sure to increase with IT management naming it as one of their top three areas for technology investment in 2018. The tide of developer opinion is following suit: we found that the majority of developers agree that a DevOps workflow saves valuable time during the development process. Teams currently practicing DevOps confirm the productivity gains – high performers, who told us they deploy their code on demand, and who estimated that they spend 50 percent or more of their time on new work, report having a clear DevOps culture at rates more than double that of lower-performing teams.\n\n## Open source\n\nOpen source projects like [Kubernetes](/blog/containers-kubernetes-basics/) and [CoreOS](/blog/coreos-acquisition/) have gained a lot of recent attention and this year’s survey underscores the value of creating software in the open. 92 percent of total respondents agree that open source tools are important to software innovation and nearly 50 percent report that most of their tools are open source.\n\n## About the 2018 survey\n\nGitLab surveyed 5,296 software professionals of varying backgrounds and industries around the world. The margin of error is two percent, assuming a population size of 21 million software professionals and 99 percent confidence level.\n\n## Methodology\n\nWe launched this Global Developer Survey on November 17, 2017, collecting responses\nuntil December 18, 2017. During that time, we promoted the survey primarily on GitLab’s\nsocial media channels and newsletter. In order to correct for the gender imbalance\ndeveloping in our survey sample, we made an extra push via Twitter on December 5 to encourage\nwomen involved in the software development lifecycle to take the survey. By the end of the open\nperiod, we achieved approximately 25 percent female respondents, the same percentage of women who currently\nhold computing roles, according to [NCWIT](https://www.ncwit.org/sites/default/files/resources/womenintech_facts_fullreport_05132016.pdf).\n\n| Frequently asked questions |\n| -------- | -------- |\n| **How can I access the report?**   | You can view the complete report [here](/developer-survey/).   |\n| **Are the raw results publicly available?**  | Yes, you can view the raw data [here](https://www.surveymonkey.com/results/SM-G3S6S63P8/).   |\n| **Did only GitLab users take the survey?** | No, it was open to all who work in software production. You can view the survey demographics [here](/developer-survey/).  |\n| **How can I ask questions or give feedback about the survey and results?** | You can direct questions or comments about the survey to [surveys@gitlab.com](mailto:surveys@gitlab.com). |\n| **I’d like to participate in the next survey. Can I sign up for alerts?** | The best way to receive news about the Global Developer Survey is to sign up for our bi-weekly newsletter – you can do that below or visit our [Subscription Center](https://page.gitlab.com/SubscriptionCenter.html). |\n","insights",[681,682,9,683],"developer survey","open source","workflow",{"slug":685,"featured":6,"template":686},"2018-global-developer-report","BlogPost","content:en-us:blog:2018-global-developer-report.yml","2018 Global Developer Report","en-us/blog/2018-global-developer-report.yml","en-us/blog/2018-global-developer-report",{"_path":692,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":693,"content":699,"config":706,"_id":708,"_type":14,"title":709,"_source":16,"_file":710,"_stem":711,"_extension":19},"/en-us/blog/2019-developer-survey-announcement",{"title":694,"description":695,"ogTitle":694,"ogDescription":695,"noIndex":6,"ogImage":696,"ogUrl":697,"ogSiteName":670,"ogType":671,"canonicalUrls":697,"schema":698},"The 2019 developer survey: Help shape the industry","What do you need in order to thrive? From fewer delays in the development process to early detection of security vulnerabilities, we want to identify what you need to move ideas into action.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679930/Blog/Hero%20Images/2019-developer-survey-cover.png","https://about.gitlab.com/blog/2019-developer-survey-announcement","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The 2019 Global Developer Survey is now open! Share your thoughts to shape the industry.\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2019-01-23\",\n      }",{"title":700,"description":695,"authors":701,"heroImage":696,"date":703,"body":704,"category":679,"tags":705},"The 2019 Global Developer Survey is now open! Share your thoughts to shape the industry.",[702],"Suri Patel","2019-01-23","\n\nAs software professionals, you are the creators, builders, researchers, and\nproblem solvers of technology, and your opinions should be the pulse of the\nindustry. We passionately believe that [everyone can contribute](/company/mission/#mission),\nso we created the Global Developer Survey as a way to help you influence the way\nyou, your team, and your managers code, test, and deploy. By voicing your\nthoughts in the Developer Survey, you can shape a solution-focused approach\nto industry-wide challenges. We hope that, together, we can drive industry\ndialogue around the needs of today’s software professionals, sparking a movement\nto remove roadblocks and focus on helping teams thrive.\n\nWe'll examine the findings from the Developer Survey and provide a summary and\nanalysis in the Developer Report, which will be published in May.\nThis comprehensive report dissects cross-functional relationships and offers insights\ninto successful practices, problem areas, and potential solutions. In our\n[previous reports](/developer-survey/), we explored what teams need in order to\ndo their best work. This year, we want to uncover what software professionals\nneed in order to rapidly innovate. Whether you need more accurate estimates on\nplanning features, a decrease in development process delays, or early detection\nof security vulnerabilities, we want to identify your needs. Learn more and [share on Twitter](https://twitter.com/gitlab/status/1088116356405518343).\n\n## How the survey works\n\nThe survey takes less than 20 minutes to complete and includes\napproximately 45 questions. The survey is anonymous, and the data and results\nwill be reviewed in aggregate. We’re covering a large range of topics this year,\nincluding delays in the development lifecycle, planning features, and security analysis.\n\nTo ensure that the Developer Survey asks the right questions to elicit strong\nfindings, the UX research team collaborated on the survey, and we tested the\nquestions with the GitLab engineering team to gather feedback and suggestions\nfor improvement.\n\nThe survey is open to anyone involved in software engineering – from developers\nand engineers and security professionals to DevOps managers and IT executives.\nIf you're involved in software engineering, we'd love to hear your thoughts!\n\n## Swag and iPad Pro giveaway\n\nWe’re so grateful that you’re partnering with us to learn about the industry,\nso we’re giving away five GitLab messenger bags and one iPad Pro! Each week, we’ll\nrandomly select one respondent to receive a messenger bag. To enter to win the\niPad Pro, please take the survey, share the survey on social, and send a link to your\npublic post to giveaways@gitlab.com. We’ll randomly select a winner when the\nsurvey closes. Good luck! We hope you win. 😃\n\n## Frequently asked questions\n\n**What is the Global Developer Survey?**\n\nThe Developer Survey is an anonymous questionnaire that gathers insights from software\nprofessionals to reflect the growing needs and viewpoints of the industry.\n\n**What is the Global Developer Report?**\n\nThe Global Developer Report is a summary and analysis of the findings gathered in\nthe Developer Survey. It dissects cross-functional relationships and offers\ninsights into successful practices, problem areas, and potential solutions.\n\n**What is GitLab’s role?**\n\nWhile the Developer Report is published by GitLab, it’s not about GitLab. As\nsoftware professionals, your words have the power to shape the industry, inform\nleadership, and set trends, and your thoughts drive the survey. GitLab only\nwants to help you amplify your voices. \n\n**When does the survey open/close?**\n\nThe survey [opened on Jan. 23](https://twitter.com/gitlab/status/1088116356405518343) at 8am PT and closes on Feb. 27 at 11:59pm PT.\n\n**How do I win a prize?**\n\nTo enter to win a messenger bag, please complete the survey and enter your email address.\nTo enter to win the iPad Pro, please take the survey and enter your email address, share\nthe survey on social, and send a link to your public post to giveaways@gitlab.com.\n\n{::options parse_block_html=\"true\" /}\n\n\u003Ci class=\"fab fa-gitlab\" style=\"font-size:.85em\" aria-hidden=\"true\">\u003C/i>&nbsp;&nbsp;\n[Take the survey](https://www.surveymonkey.com/r/KY2WBCK)\n&nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n{: .alert .alert-webcast}\n\n*You must complete the survey and provide an email address to be eligible to\nwin a prize. Your privacy is important to us, so email addresses will only be used for the\ngiveaway draw and will not be saved.* [Please read the official sweepstake\nrules here.](https://about.gitlab.com/community/sweepstakes/2019-developer-survey.index.html)\n{: .note}\n",[681,9,267],{"slug":707,"featured":6,"template":686},"2019-developer-survey-announcement","content:en-us:blog:2019-developer-survey-announcement.yml","2019 Developer Survey Announcement","en-us/blog/2019-developer-survey-announcement.yml","en-us/blog/2019-developer-survey-announcement",{"_path":713,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":714,"content":720,"config":729,"_id":731,"_type":14,"title":732,"_source":16,"_file":733,"_stem":734,"_extension":19},"/en-us/blog/2019-gartner-aro-mq",{"title":715,"description":716,"ogTitle":715,"ogDescription":716,"noIndex":6,"ogImage":717,"ogUrl":718,"ogSiteName":670,"ogType":671,"canonicalUrls":718,"schema":719},"Gartner names GitLab challenger in release orchestration","We're happy to share that GitLab is a Challenger in Gartner's 2019 ARO Magic Quadrant","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680619/Blog/Hero%20Images/construction-blueprint.jpg","https://about.gitlab.com/blog/2019-gartner-aro-mq","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab named Challenger in Gartner Magic Quadrant for Application Release Orchestration 2019\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Chia\"}],\n        \"datePublished\": \"2020-01-16\",\n      }",{"title":721,"description":716,"authors":722,"heroImage":717,"date":724,"body":725,"category":726,"tags":727},"GitLab named Challenger in Gartner Magic Quadrant for Application Release Orchestration 2019",[723],"William Chia","2020-01-16","\n\nWe are pleased to share that recently GitLab was named a Challenger in the Gartner 2019 Magic Quadrant for Application Release Orchestration. ARO is a relatively new area for GitLab, but we believe our placement as a Challenger compared to last year’s placement as a Niche Player reflects the work we’ve put in and rapid progress we’ve made.\n\nYou can visit our [ARO MQ commentary page](/analysts/gartner-aro19/) to read our thoughts on the ARO markets and this report along with the lessons we learn participating. We’ll be adding links to this page to our roadmap items that show our plans for continued improvement. \n\nGartner, Magic Quadrant for Application Release Orchestration, 7 October 2019, Daniel Betts, Chris Saunderson, Hassan Ennaciri, Christopher Little Gartner does not endorse any vendor, product or service depicted in its research publications, and does not advise technology users to select only those vendors with the highest ratings or other designation. Gartner research publications consist of the opinions of Gartner’s research organization and should not be construed as statements of fact. Gartner disclaims all warranties, express or implied, with respect to this research, including any warranties of merchantability or fitness for a particular purpose. \n{: .note}\n\nImage by \u003Ca href=\"https://pixabay.com/users/pisauikan-4552082/?utm_source=link-attribution&amp;utm_medium=referral&amp;utm_campaign=image&amp;utm_content=2682641\">pisauikan\u003C/a> from \u003Ca href=\"https://pixabay.com/?utm_source=link-attribution&amp;utm_medium=referral&amp;utm_campaign=image&amp;utm_content=2682641\">Pixabay\u003C/a>\n{: .note}\n","news",[9,728,726,109],"inside GitLab",{"slug":730,"featured":6,"template":686},"2019-gartner-aro-mq","content:en-us:blog:2019-gartner-aro-mq.yml","2019 Gartner Aro Mq","en-us/blog/2019-gartner-aro-mq.yml","en-us/blog/2019-gartner-aro-mq",{"_path":736,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":737,"content":743,"config":750,"_id":752,"_type":14,"title":753,"_source":16,"_file":754,"_stem":755,"_extension":19},"/en-us/blog/4-must-know-devops-principles",{"title":738,"description":739,"ogTitle":738,"ogDescription":739,"noIndex":6,"ogImage":740,"ogUrl":741,"ogSiteName":670,"ogType":671,"canonicalUrls":741,"schema":742},"4 Must-know DevOps principles","Learn four key DevOps principles and why they are essential to successful development and deployment.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665982/Blog/Hero%20Images/jpvalery-9pLx0sLli4unsplash.jpg","https://about.gitlab.com/blog/4-must-know-devops-principles","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"4 Must-know DevOps principles\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-02-11\",\n      }",{"title":738,"description":739,"authors":744,"heroImage":740,"date":746,"body":747,"category":679,"tags":748},[745],"GitLab","2022-02-11","The popular software development methodology [DevOps](/topics/devops/) can be a bit confusing to beginners, especially when it encompasses other areas such as security (DevSecOps), business (BizDevOps), and the like. \n\n## So what is DevOps?\n\nDevOps takes two previously separated teams – software development and IT operations – and turns them into one united front that creates secure code while speeding up the software development lifecycle. DevOps fundamentals include a collaborative and communicative culture, automated testing, releases and deployments, and frequent iteration. Another commonly used term in the DevOps space is [DevSecOps](https://about.gitlab.com/topics/devsecops/), which refers to a DevOps practice with a specific emphasis on security.\n\nWhat matters is what’s at the heart of the DevOps methodology – these four key principles that can improve your organization’s software development practice.\n\n1. Automation of the software development lifecycle\n2. Collaboration and communication\n3. Continuous improvement and minimization of waste\n4. Hyperfocus on user needs with short feedback loops\n\n## An examination of key DevOps principles\n\nRoughly 15 years ago, the idea emerged to bring development and operations together in a seamless fashion. In 2009, the term “DevOps” was coined by Patrick Debois, who is considered one of the methodology’s primary gurus. DevOps includes a lot of the principles of [Agile software development](/topics/agile-delivery/), but with a special emphasis on breaking down development and operations silos. \n\nDevOps has continued to grow in popularity since that time, from small businesses to enterprises with legacy systems and nearly every size company in between. Like almost anything else, DevOps can adapt to an organization’s unique needs and environment, adjusting to what’s most important to the business. \n\nAs such, it’s possible to find many different flavors of DevOps, though, at their core, each has the following 4 must-know principles in place:\n\n### Automation of the software development lifecycle\n\nThe North Star for all DevOps teams is automation. Before DevOps, software development was a very manual effort requiring human involvement (and physical handoffs) at every stage of the process. All of this human involvement meant companies were lucky to update or release new code once a year, and many were on an 18- or 24-month release cadence. \n\nToday so-called [“elite DevOps teams”](/blog/how-to-make-your-devops-team-elite-performers/) release code many times a day – and they’re able to do that largely because of automation. \n\nTo understand the power and importance of automation in DevOps, consider software testing, an often overlooked and unappreciated step that is regularly scapegoated for causing release delays. There’s no question software testing is critical; without testing companies risk releasing broken or actually even unsafe code. \n\nBut testing is perhaps the most hands-on of all the steps in DevOps, requiring people to write test cases, run myriad tests, analyze the results, and then circle back with developers for fixes. That’s all a long way of saying [there’s a reason teams point to testing](/blog/want-faster-releases-your-answer-lies-in-automated-software-testing/) as the number one reason code isn’t released on time.\n\nEnter automation and the idea that the most basic software tests could happen as the code is written. Test automation dramatically speeds up the entire process and frees software testers to look for potentially more damaging code quality issues. \n\nAlthough testing is one of the most dramatic automation “wins” in DevOps, it’s far from the only one. [Continuous integration](/topics/ci-cd/) automates the process of moving new code into existing code, while [continuous deployment](/blog/how-to-keep-up-with-ci-cd-best-practices/) helps automate releases. And [Infrastructure as Code](/topics/gitops/infrastructure-as-code/) makes it easy to automate the process of provisioning developer environments. \n\n### Collaboration and communication\n\nA good DevOps team has automation, but a top-notch DevOps team also has collaboration and communication. The basic idea of bringing dev and ops together (as well as sec, test, stakeholders, etc.) hinges on teams being able to collaborate. And that can’t happen if there isn’t clear and regular communication.\n\nThis sounds like a deceptively simple principle of DevOps, but, like most things, the devil is in the details. Devs want to write code and move it along into the world; ops professionals focus on tools, compliance, and the cloud; and the security team wants to ensure the code is safe. Dev, ops, and sec don’t have the same priorities, might not speak the same “language,” and are likely to approach problem-solving from very different perspectives. A case in point: [Dev and sec still don’t really get along](/blog/developer-security-divide/), in part because the communication and collaboration gap remains wide.\n\nIt takes effort to bring teams together and often [out-of-the-box thinking](/blog/want-secure-software-development-our-top-5-tips-to-bring-dev-and-sec-together/). And in one of those \"chicken and egg\" situations, teams need to communicate for successful DevOps, but DevOps itself can lead to better communication, and happier developers, according to findings in our [2021 Global DevSecOps Survey](/developer-survey/).\n\n### Continuous improvement and minimization of waste\n\nLeaning heavily on earlier software development methodologies, including [Lean](https://searchsoftwarequality.techtarget.com/definition/lean-programming) and Agile, DevOps also focuses on reducing waste and continuous improvement. Whether it’s automating repetitive tasks like testing so as not to waste time, or reducing the number of steps required to release new code, well-functioning DevOps teams continue to measure performance metrics to determine areas that need improvement. \n\nExpect teams to strive [to continuously improve release times](/blog/why-improving-continuously-speeds-up-delivery/), reduce the [mean-time-to-recovery](https://pipelinedriven.org/article/devops-metric-mean-time-to-recovery-mttr-definition-and-reasoning), and number of bugs found, in addition to a number of other metrics. \n\n### Hyperfocus on user needs with short feedback loops\n\nThe final must-know DevOps principle is the importance of bringing the actual user into every step of this process. Through automation, improved communication and collaboration, and continuous improvement, DevOps teams can take a moment and focus on what real users really want, and how to give it to them. There’s no question that finding a way into the user mind is quite tricky, and [teams can struggle to implement processes](/blog/journey-to-the-outer-loop/) to achieve this. \n\nThe other difficult piece of this is that user feedback, once gathered, must be delivered quickly so time isn’t wasted. That’s why short feedback loops are critical, and why teams need to [put energy into making them even shorter](/blog/journey-to-the-outer-loop/) as time goes on. \n\n## Benefits of a DevOps model and practices\n\nWhat happens if teams do DevOps right? In our 2021 survey, 60% of developers told us they were releasing code at least 2x faster, thanks to DevOps. Other benefits of a DevOps model include improved code quality, faster time to market, and better planning. \n\nAnd for bonus points, survey takers told us that having a successful DevOps practice also made for happier developers, and there’s [scientific data](/blog/why-software-developer-job-satisfaction-matters-and-how-to-make-it-happen/) that shows happier devs are more productive. \n\n## What are some challenges of implementing DevOps?\n\nDevOps can be challenging in the begining, particularly if it’s the first time being implemented within an organization. Here are some of the challenges of implementing DevOps. \n\n* **Breaking down the silos.** It may be difficult to break the mentality of development and operations being separate entities. Gather a basic understanding of the roles and responsibilities of a combination DevOps team.\n\n* **Understanding the jargon.** DevOps comes with a lot of shorthand, tech jargon, and SO many acronyms, like CI/CD. Take some time to study and remember that learning on the go is entirely normal and acceptable. \n\n* **Migrating from legacy software.** DevOps can be especially tricky for teams trying to migrate legacy software. Many tools designed for DevOps don’t work with mainframes (as one example) and integrations can be challenging even for experienced DevOps pros. It also doesn’t help that there’s a shortage of mainframe and other legacy programmers.\n\n* **Too many tools.** Teams spend so much time integrating and maintaining tools it’s getting in the way of actually developing, releasing and monitoring code. This is commonly known as the “toolchain tax.”\n\n* **Taming the learning curve frustration.** DevOps is complicated, and learning how it works is a marathon, not a sprint. Practice patience and grace with the team as implementation goes forward and rely on any resources available, such as help documentation and platform representatives – and sometimes plain old trial and error.\n\n## How to get started with DevOps in your organization\n\nWhen preparing to get started with DevOps, the following preperation is required:\n\n1. Map out the goals behind DevOps implementation.\n2. Clarify the roles and responsibilities of each stakeholder involved.\n3. Start basic and grow with experience.\n4. Plam to automate as much as possible.\n5. Plan your toolchain (and remember, toolchains can always be altered).\n6. Set up regular progress checkpoints.\n7. Be prepared to constantly iterate (but after giving something enough time to prove that iterating is necessary). \n\n## What is the future of DevOps?\n\nDevOps adoption and success experienced an enormous “jumpstart” thanks to the global pandemic. Teams moved past some of the cultural “how do we work together?” issues and matured into the “how do we adopt the right technologies?” mindset, based on results from our survey. Use of advanced technologies, including Kubernetes, [DevOps platforms](/topics/devops-platform/), and artificial intelligence (AI)/machine learning (ML) give hints as to what the future of DevOps looks like. \n\nIt’s safe to expect increased automation, smarter AI/ML-powered decision making (starting in places like [code review](/blog/the-road-to-smarter-code-reviewer-recommendations/) and a more thoughtful choice of tools, such as continuing adoption of DevOps platforms to streamline the process.",[9,749,231],"collaboration",{"slug":751,"featured":6,"template":686},"4-must-know-devops-principles","content:en-us:blog:4-must-know-devops-principles.yml","4 Must Know Devops Principles","en-us/blog/4-must-know-devops-principles.yml","en-us/blog/4-must-know-devops-principles",{"_path":757,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":758,"content":764,"config":772,"_id":774,"_type":14,"title":775,"_source":16,"_file":776,"_stem":777,"_extension":19},"/en-us/blog/5-code-review-features",{"title":759,"description":760,"ogTitle":759,"ogDescription":760,"noIndex":6,"ogImage":761,"ogUrl":762,"ogSiteName":670,"ogType":671,"canonicalUrls":762,"schema":763},"How GitLab's 5 new code review features will make life easier","Code reviews are hard to get right. Here are five new features in our DevOps Platform designed to streamline code reviews and provide vital context.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667400/Blog/Hero%20Images/lagos-techie-unsplash.jpg","https://about.gitlab.com/blog/5-code-review-features","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab's 5 new code review features will make life easier\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2021-09-09\",\n      }",{"title":759,"description":760,"authors":765,"heroImage":761,"date":767,"body":768,"category":769,"tags":770},[766],"Brendan O'Leary","2021-09-09","\n_This is the second in a series of blog posts looking at the challenges of code review and the ways a DevOps platform can help. Read the [first post](/blog/the-code-review-struggle-is-real-heres-what-you-need-to-know/)._\n\n## What is a code review, and why is it important?\n\nCode review can be one of the most deceivingly difficult things in delivering software faster. Given the high stakes involved, we've made some key additions to our DevOps Platform that focus on making the code review process as seamless and effective as possible. We believe the number one way to make code reviews effective is to provide context. \n\nToo often we think of [code review tool features](/topics/version-control/what-are-best-code-review-tools-features/) as only \"reading\" and commenting on others' code - but what a good code reviewer does is understand the entire context of the proposed change. Context-driven code reviews should include factors like the issue that spurred on the change, how the change impacts non-obvious things like code quality, security, and performance, and whether the code is maintainable after the change is in place.\n\n## Simplifying code reviews\n\nGiven all of that, we made the merge request the central point of change management and it's one of the key benefits of a DevOps Platform. Using a merge request allows code submitters and reviewers alike to have all of the information required to make the right decisions about a particular change. Making sure that everyone has the same information, and is as informed as possible about how a change will impact the project over all, leads to code reviews that are both quicker and more effective.  \n\nOver the last year we've added five features that help ease the code review pain. Here's a look at all of them:\n\n### 1) Meeting you where you are\n\nSome of the biggest code review changes involve meeting folks where they are - and allowing for a more natural feeling code review. As engineers, we spend most of our days glued to our IDE of choice. And we're used to code not just being static words on a screen, but also interacting and running that code to check its performance and outputs. That's why GitLab has brought a truly integrated experience to your development environment.\n\n**[Here's how to get started with a [DevOps platform](/topics/devops-platform/)]**\n\nIf you use Visual Studio Code as your main development environment like I do, you can now [view merge requests directly in VSCode](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow). In addition, you can [comment and see comments](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues/342) in that view as well as [checkout the branch directly from VSCode](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues/63). This familiar environment gives you all the benefits of GitLab MRs - CI/CD, security scanning, approval workflows - without having to leave your own development environment. \n\nBut what if you're not at your development box? Or you don't have this particular library or project installed and running locally?  Well there's a great solution for that - [Gitpod](https://gitpod.io) - and it also integrates directly with GitLab.  Gitpod allows you to have a working, containerized development environment in seconds. And now with GitLab 14.2, you can [launch a Gitpod workspace directly from the GitLab merge request](https://www.gitpod.io/blog/gitlab-mr-gitpod-integration).  That means with one button in GitLab you can go from a static code review into a running application with all of the proposed changes.\n\n### 2) Code quality notices built into the MR diff\n\nGitLab already brings [code quality](https://docs.gitlab.com/ee/ci/testing/code_quality.html), [security](https://docs.gitlab.com/ee/user/application_security/), [performance](https://docs.gitlab.com/ee/ci/testing/load_performance_testing.html), and [other metrics](https://docs.gitlab.com/ee/ci/testing/metrics_reports.html) directly into the merge request. But in GitLab 13.12, we also added the ability to see [code quality notices directly in the MR diff](https://docs.gitlab.com/ee/ci/testing/code_quality.html). This means that changes to code quality are presented right next to the offending code, making it quick and easy for reviewers to make suggestions about how to keep code quality top notch while shipping changes.\n\n![Code quality notice shown in-line with merge request diff](https://about.gitlab.com/images/blogimages/code_quality_mr_diff_report_v14_2.png)\n\n### 3) File-by-file reviews\n\nSometimes with changes it is nice to use the file explorer view and be able to see changes across multiple files. Other times you might want to do a thorough pass on *every* file to ensure you didn't miss anything. Toggling between seeing all of the changed files and one file at a time is a small but valuable feature that makes code reviews easier.\n\n![Animated image showing changing between show all and show one file at time view in a merge request](https://about.gitlab.com/images/blogimages/animated-single-file-review-example.gif)\n\n### 4) Check off a file as reviewed\n\nSpeaking of small but powerful features, one of my favorite features is something many would consider incredibly small.  But to that I would say - there are no small features, only small merge requests 😄!\n\n**[How to [get the most out of your DevOps platform](/topics/devops/seven-tips-to-get-the-most-out-of-your-devops-platform/)]**\n\nThe ability to check off files as reviewed has become a natural part of my code review workflow - even when the code I'm reviewing might be code I wrote myself! It allows me to focus more of my review time on the biggest impact changes, ignoring smaller changes or ones that don't directly impact the biggest concerns in a review. And in every review session I use it to make sure I've ACTUALLY reviewed every file...not that any reviewer would ever leave one out 😉.\n\n![Viewed check box checked and a file hidden as already reviewed](https://about.gitlab.com/images/blogimages/filed-viewed-merge-request.png)\n\n### 5) Reviewers vs. Assignee\n\nThe last improvement to code review in our DevOps Platform is the addition of \"reviewers\" as an option in a merge request, alongside the existing choice of \"assignee.\" This can help speed up code reviews by ensuring all team members who have to sign off on a merge request are informed and consulted while also making sure there is a clear responsibility on who will take the next action on a merge request, or be the one to actually click the \"merge when pipeline succeeds\" button.\n\nWe hope your teams will try these new and improved DevOps Platform code review features - and we're not done yet.  We'll be shipping improvements and updates to the code review process all of the time. And because everyone can contribute you can add your own ideas and suggestions into our DevOps Platform to make code reviews less painful and more effective.\n","devsecops",[771,9],"code review",{"slug":773,"featured":6,"template":686},"5-code-review-features","content:en-us:blog:5-code-review-features.yml","5 Code Review Features","en-us/blog/5-code-review-features.yml","en-us/blog/5-code-review-features",{"_path":779,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":780,"content":786,"config":794,"_id":796,"_type":14,"title":797,"_source":16,"_file":798,"_stem":799,"_extension":19},"/en-us/blog/5-teams-that-made-the-switch-to-gitlab-ci-cd",{"title":781,"description":782,"ogTitle":781,"ogDescription":782,"noIndex":6,"ogImage":783,"ogUrl":784,"ogSiteName":670,"ogType":671,"canonicalUrls":784,"schema":785},"5 Teams that made the switch to GitLab CI/CD","See what happened when these five teams moved on from old continuous integration and delivery solutions and switched to GitLab CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678657/Blog/Hero%20Images/ci-cd-competitive-campaign-blog-cover.png","https://about.gitlab.com/blog/5-teams-that-made-the-switch-to-gitlab-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 Teams that made the switch to GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-04-25\",\n      }",{"title":781,"description":782,"authors":787,"heroImage":783,"date":789,"body":790,"category":791,"tags":792},[788],"Chrissie Buchanan","2019-04-25","\nNo team is immune to process challenges, and as organizations grow these challenges only get worse. Sometimes there’s a lack of visibility during the development process, sometimes legacy systems create instability and lack functionality, and sometimes things just _stop working_. Continuous integration and delivery [(CI/CD)](/topics/ci-cd/) enables teams to deploy faster, and finding the right tool can make a big difference in the development lifecycle. Great companies know how to identify problems and when it’s time to find better solutions.\n\nWe’ve previously shared [why teams love GitLab CI/CD](/blog/why-gitlab-ci-cd/), and now we want to highlight five real-world examples of teams that abandoned dated continuous integration and delivery solutions and made the switch to GitLab CI/CD. We’ll show you how they:\n\n*   Reduced costs.\n*   Deployed faster.\n*   Improved efficiency.\n*   Made engineers’ lives easier.\n\n### Verizon Connect\n\nThe Verizon Connect Telematics Container Cloud Platform team had several challenges: too many tasks, disjointed processes, and outdated, Java-based monolithic applications. Add tools like [BitBucket](/competition/bitbucket/), Jenkins, and Jira in the mix and the Verizon Connect team was struggling with _data center builds that took nearly 30 days_. It was time to start from scratch.\n\nThe team chose GitLab to support this infrastructure initiative and reduced data center deploys from 30 days to _under eight hours_.\n\n[Read on](/blog/verizon-customer-story/)\n{: .alert .alert-gitlab-purple}\n\n### Ticketmaster\n\nFor the Ticketmaster mobile team, a two-hour pipeline for a minor change was the last straw. After years with Jenkins and a system weighed down by plugins and legacy development, they knew they needed to reevaluate their continuous integration and delivery tools.\n\nAfter adopting GitLab CI/CD, Ticketmaster was able to move to weekly releases, decreasing their pipeline execution time from two hours to _only eight minutes_ to build, test, and publish artifacts.\n\nLearn how GitLab CI/CD gave the mobile team their Friday afternoons back.\n\n[Read more](/blog/continuous-integration-ticketmaster/)\n{: .alert .alert-gitlab-purple}\n\n### HumanGeo\n\nAs a software development company, HumanGeo ships a lot of code. Development speed is vital, and when Jenkins CI became yet another thing to manage, they needed to make a change.\n\nJustin Shelton, an engineer at HumanGeo, talks about why they decided to switch to GitLab CI/CD, and how they were able to:\n\n*   Cut admin time by 96 percent.\n*   Cut costs by 33 percent.\n*   Increase the pace of development.\n\n[Learn how](/blog/humangeo-switches-jenkins-gitlab-ci/)\n{: .alert .alert-gitlab-purple}\n\n### Wag!\n\nIn three years, Wag! has supported more than one billion walks through its on-demand dog walking, sitting, and boarding mobile app. The engineering team was searching for a simplified solution that would streamline the development process. The company had been using Travis and other continuous integration and delivery systems but wanted something with a better interface that offered more control.\n\nWag!'s infrastructure engineers no longer have to manually stage and test their work. They now use the full GitLab CI/CD pipeline – so whether it's the Android application, the web application, the API, or infrastructure, it's all being tested, built, and deployed through GitLab.\n\n[Check it out](/blog/wag-labs-blog-post/)\n{: .alert .alert-gitlab-purple}\n\n### Paessler AG\n\nPaessler AG’s PRTG Network Monitor is used by enterprises and organizations of all sizes and industries across more than 170 countries. It’s critical that their monitoring service is able to keep up with developments but stability issues meant that sometimes things just stopped working.\n\nThe Paessler team initially chose GitLab for version control, but after seeing the functionality and potential of GitLab pipelines, they decided to replace Jenkins as well. Since adopting GitLab CI/CD, the Paessler AG team now has 4x more releases and 90 percent of QA self-served.\n\n[Read the case study](/customers/paessler/)\n{: .alert .alert-gitlab-purple}\n\nWant to know what GitLab CI/CD could do for your team? You’re invited to join us for our CI/CD webcast, _Mastering continuous software development_. Learn how GitLab’s built-in CI/CD helps teams apply continuous software development without all the complicated integrations and plugin maintenance.\n\nIn this webcast, we’ll cover:\n\n* Three main approaches to the continuous software development methodology.\n* The benefits of continuous integration, delivery and deployment practices.\n* A demonstration of GitLab’s CI/CD pipeline to build, test, deploy, and monitor your code.\n\n{::options parse_block_html=\"true\" /}\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>&nbsp;&nbsp;\nWatch GitLab's [Mastering continuous software development](/webcast/mastering-ci-cd/) webcast\n&nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n{: .alert .alert-webcast}\n","engineering",[793,9,109],"customers",{"slug":795,"featured":6,"template":686},"5-teams-that-made-the-switch-to-gitlab-ci-cd","content:en-us:blog:5-teams-that-made-the-switch-to-gitlab-ci-cd.yml","5 Teams That Made The Switch To Gitlab Ci Cd","en-us/blog/5-teams-that-made-the-switch-to-gitlab-ci-cd.yml","en-us/blog/5-teams-that-made-the-switch-to-gitlab-ci-cd",{"_path":801,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":802,"content":808,"config":815,"_id":817,"_type":14,"title":818,"_source":16,"_file":819,"_stem":820,"_extension":19},"/en-us/blog/5-ways-collaboration-boosts-productivity-and-your-career",{"title":803,"description":804,"ogTitle":803,"ogDescription":804,"noIndex":6,"ogImage":805,"ogUrl":806,"ogSiteName":670,"ogType":671,"canonicalUrls":806,"schema":807},"5 ways collaboration boosts productivity and your career","Collaboration is a powerful tool and DevOps pros that learn how to master it will expand their growth opportunities.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668473/Blog/Hero%20Images/john-schnobrich-FlPc9_VocJ4-unsplash.jpg","https://about.gitlab.com/blog/5-ways-collaboration-boosts-productivity-and-your-career","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 ways collaboration boosts productivity and your career\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-05-02\",\n      }",{"title":803,"description":804,"authors":809,"heroImage":805,"date":811,"body":812,"category":813,"tags":814},[810],"Sharon Gaudin","2022-05-02","\n\nA lot of DevOps professionals might feel confident they’ve got a lock on their DevOps role. They don’t need anyone else chiming in on how to update a software feature or plan a new product.\n\nOther DevOps pros want to focus on learning new programming languages or figuring out how best to use machine learning. They think they don’t have time for so-called soft skills like [communication and collaboration](/blog/six-key-practices-that-improve-communication/).\n\nAt its heart, DevOps is collaboration. It’s a team sport. Of course, staying sharp with hard skills like machine learning, new programming languages, and other cutting-edge technology is fantastic, but don’t ignore soft skills. Enabling teamwork is a [cornerstone of DevOps](/blog/4-must-know-devops-principles/).\n\nMaking this cultural shift means teammates are all pulling in the same direction. It means more, and more diverse, input leads to better, well-rounded products and software. And it also means career development.\n\nHere’s a look at just a few ways a [culture of collaboration](/blog/collaboration-communication-best-practices/) can benefit a DevOps team, software development and deployment, and DevOps professionals’ careers.\n\n## Boosting DevOps professionals’ careers\n \nIt’s clear that [companies are increasingly dependent](/blog/the-top-skills-you-need-to-get-your-devops-dream-job/) on DevOps professionals who are able to not only work with various teams, but who also are able to clearly communicate with colleagues in other departments, like finance and marketing. The [2021 Global DevSecOps Survey](https://learn.gitlab.com/c/2021-devsecops-report?x=u5RjB_) reported that IT professionals working in development, security, and operations all said they need better communication and collaboration skills for their future careers. And nearly 23% said these soft skills will give the biggest boost to their careers. Being able to work across departments, clearly communicate needs and ideas, and work together to innovate better products makes a tech person more valuable to the overall company, leading to [management roles and higher salaries](/blog/soft-skills-are-the-key-to-your-devops-career-advancement/).\n\n## Using the buddy system to iterate faster\n\nThe bedrock of a [DevOps culture is collaboration and joint responsibility](/blog/if-its-time-to-learn-devops-heres-where-to-begin/). And for good reason, because better cooperation leads to more, and more efficient, continuous, iterative development and feature deployment. Cooperation makes a DevOps team more agile so it can adapt to changes in projects and workloads. \n\nWith traditional application development, managers, developers, security professionals, and those on the operations team generally work in silos. They don’t communicate readily or well. They don’t work together on projects or share documentation and knowledge. With DevOps, though, those silos begin to be broken down. And by joining forces, teammates can pool efforts to assess problems, envision solutions, and create and deploy high-quality applications from a single end-to-end application. Breaking down those silos fosters better decision-making and creativity, and increases information and resource sharing. \n\n## Creating better products\n\nBy creating partnerships, DevOps teams are able to more quickly adjust to changing market needs and take on new competitors. Sharing data, and the workload, across disparate teams also empowers them to find out what customers need, what delights them, and the features that need to be created. More input from people with different perspectives and different backgrounds means a company will get software with features that speak to a wider range of users. All of this leads to better products, and that leads to a stronger business.\n\n## Pulling different business teams together\n\nFostering cooperation isn’t just for DevOps team members. A truly collaborative culture also should include colleagues in different parts of the company. Members of the security team, marketing, finance, customer service, and the C-suite all can participate to create better software. Collaboration between DevOps and security, for instance, leads to less duplication of effort, more secure software, and a more secure company. Similarly, someone in customer service would have direct insights into what users like, and don’t like, about current products. Integrating their feedback into ongoing processes will provide the ability to leverage their expertise before code is delivered. \n\n## Taking advantage of everyone’s expertise\n\nBy inviting discussion, input, and assistance from experienced and new team members, as well as from people in different business departments, a culture can be built around learning from and relying on others’ expertise. This enables DevOps professionals to discover other perspectives and contribute beyond what might have once been a narrow focus. Think of everyone’s knowledge and experience as pieces of a shared resource library that can be tapped into for every project.\n \n\n","careers",[9,813,749],{"slug":816,"featured":6,"template":686},"5-ways-collaboration-boosts-productivity-and-your-career","content:en-us:blog:5-ways-collaboration-boosts-productivity-and-your-career.yml","5 Ways Collaboration Boosts Productivity And Your Career","en-us/blog/5-ways-collaboration-boosts-productivity-and-your-career.yml","en-us/blog/5-ways-collaboration-boosts-productivity-and-your-career",{"_path":822,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":823,"content":829,"config":835,"_id":837,"_type":14,"title":838,"_source":16,"_file":839,"_stem":840,"_extension":19},"/en-us/blog/5-ways-to-bring-devops-to-your-campus",{"title":824,"description":825,"ogTitle":824,"ogDescription":825,"noIndex":6,"ogImage":826,"ogUrl":827,"ogSiteName":670,"ogType":671,"canonicalUrls":827,"schema":828},"5 ways to bring DevOps to your campus","Educators can give students a career advantage by collaborating with GitLab to bring DevOps lectures, tools, and community straight to the classroom.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668161/Blog/Hero%20Images/armycyberschool.jpg","https://about.gitlab.com/blog/5-ways-to-bring-devops-to-your-campus","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 ways to bring DevOps to your campus\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"PJ Metz\"}],\n        \"datePublished\": \"2022-01-11\",\n      }",{"title":824,"description":825,"authors":830,"heroImage":826,"date":832,"body":833,"category":769,"tags":834},[831],"PJ Metz","2022-01-11","\nOrganizations around the world and across industries are adopting the DevOps methodology where development and operations are blended to securely accelerate software delivery. As this approach becomes a mainstay of software development, companies will need skilled professionals to fill key DevOps roles. Yet, as with most technological change, educational opportunities often lag behind real-world applications.\n\nGitLab aims to change this and has developed five ways educators can bring DevOps instruction and our [DevOps Platform](/solutions/devops-platform/) to your campus, affording students, professors, researchers, and IT teams the unique opportunity to learn DevOps skills firsthand, including DevSecOps, and offering your graduates and organization a competitive advantage.\n\n**1. GitLab for Education program**\n\n[GitLab for Education](/solutions/education/) provides free licenses of Ultimate to educational and research institutions as long as it is used for teaching or nonprofit research purposes. If you’re going to use GitLab in a classroom and want your students to use it for their schoolwork, then this is the option for you. This is set up by a full-time employee of the university and is a full license with as many seats as you need. Our Ultimate license is everything that our major enterprise customers use to create their apps and now it’s available to university students across a variety of disciplines. Signing up is simple via our [join page](/solutions/education/join). This kicks off a process that takes a few weeks to complete and ends with a license that brings your classroom or research into the world of the DevOps Platform. Learn how GitLab for Education has benefited other institutions, including the [University of Washington](/customers/uw/), [Dublin City University](/customers/dublin-city-university/), and Heriot Watt University.\n\n**2. GitLab for Campuses**\n\nGitLab for Campuses lets your developers, IT professionals, and other employees working with the technical administration of your university have access to world-class DevOps tools. Rather than cobbling together multiple applications for a Do-It-Yourself DevOps solution, we can provide you access to our single DevOps platform at a discounted rate. [GitLab for Campuses](/solutions/education/) is an offering that covers a large swath of your user base. You would still be able to grant access to students on your campus to use GitLab just like you can with GitLab for Education, but you have the added benefit of The DevOps Platform being used for running your entire institution’s IT.\n\n**3. GitLab Guest Lecture**\n\nDevOps might be a brand-new consideration for your classroom; perhaps this is your first time hearing about it. GitLab’s education team is here to help you by providing a DevOps 101 guest lecture, which you can schedule for your class. We can have a lecture during one of your sections or set up a time for multiple sections of your class to come together and learn about what DevOps is and how to learn more. This type of industry information is invaluable for students looking to join a company right out of college. We’re not just talking about The DevOps Platform, but DevOps as an operational and cultural change in software development, as well as how DevOps implementations can be present in non-CS careers and companies. Let our team of former educators help guide your class into the exciting world of DevOps with a guest lecture. [Fill out this form](https://forms.gle/y2r5o83i8z6rfJPh8) to find out more about our Guest Lecture opportunities.\n\n**4. GitLab Student Contribution Workshop**\n\nContributing to open source is one of the best ways students can build skills, make connections, and add to their portfolio to showcase their abilities and work. Open source is everywhere in DevOps, especially at GitLab. Not only are several open source projects hosted on GitLab, GitLab itself is open core and [open for contributions](/community/contribute/).\n\nWe believe [everyone can Contribute](/company/mission/#mission), but we know that the first contribution can be daunting; students might not know where to start, how to create a merge request, or what the maintainers are looking for. Even basics like working locally and git commands might be a little confusing if students haven’t encountered them before. One way for students to  participate is through our hackathon. GitLab hosts a [hackathon](/community/hackathon/) once every three months with helpful issue tags and other ways to easily find places where we are looking for contributors to help build the future. Past hackathons have included swag prizes for every merge request that gets merged as well as a top-tier prize for the most contributions. Top contributors to GitLab are also eligible for our [Heroes program](/community/heroes/).\n\nBecause we believe so strongly in the power and importance of open source, we are offering a Contribution Workshop where a GitLab team member will walk students through some of the ways they can contribute to open source on GitLab. If your class, student organization, or large group of students wants to learn more about open source contributions, [contact us](https://docs.google.com/forms/d/e/1FAIpQLSe8yQkCMjylb-9w3WZoz3tmN7hmhnrb2LRoXWJd6D5ncP_o6Q/viewform?usp=sf_link). We’ll take it from there and bring open source to your campus.\n\n**5. GitLab Student Organization Workshop**\n\nStudent organizations still remain one of the best ways to build community among future professionals and make connections that can be vital to a student’s career. Meeting others working or studying in the same field also promotes sharing of information and resources to create an environment that maximizes potential success for everyone. These organizations, like on-campus clubs, professional fraternities and sororities, and even professional organizations with student chapters, are a great way to start building your future with your peers. GitLab is looking to bring a small workshop to student organizations at your university where you’ll not only learn how to use GitLab and start using DevOps best practices, but you’ll also boost some coding skills by actually building with GitLab. We’ll be offering a workshop in Python or Node.js where we will learn to build either a Twitter bot or a Discord bot.\n\nBecause of the ever-evolving presence of Covid and travel complications, we can’t promise this workshop will be in person. As of right now, the safest way is to get together virtually. If you are a member of a student organization looking to give your members an opportunity to learn more about DevOps, GitLab, or a fun project to make a bot and level up some skills, then this is the workshop for you. [Sign up here](https://docs.google.com/forms/d/e/1FAIpQLSecpQ1tmFpAPeeT9rasWcAtaEF8nv62LEDsKyJEdJJbe5Z8RQ/viewform?usp=sf_link)\n\n\n",[9,813,267],{"slug":836,"featured":6,"template":686},"5-ways-to-bring-devops-to-your-campus","content:en-us:blog:5-ways-to-bring-devops-to-your-campus.yml","5 Ways To Bring Devops To Your Campus","en-us/blog/5-ways-to-bring-devops-to-your-campus.yml","en-us/blog/5-ways-to-bring-devops-to-your-campus",{"_path":842,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":843,"content":849,"config":856,"_id":858,"_type":14,"title":859,"_source":16,"_file":860,"_stem":861,"_extension":19},"/en-us/blog/6-tips-to-make-software-developer-hiring-easier",{"title":844,"description":845,"ogTitle":844,"ogDescription":845,"noIndex":6,"ogImage":846,"ogUrl":847,"ogSiteName":670,"ogType":671,"canonicalUrls":847,"schema":848},"6 tips to make software developer hiring easier","If your developers are leaving and it's tough to hire, here's our best advice to stem the tide. One hint: A DevOps Platform can help!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668486/Blog/Hero%20Images/why-you-should-join-the-gitlab-security-team.jpg","https://about.gitlab.com/blog/6-tips-to-make-software-developer-hiring-easier","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"6 tips to make software developer hiring easier\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2021-11-09\",\n      }",{"title":844,"description":845,"authors":850,"heroImage":846,"date":852,"body":853,"category":769,"tags":854},[851],"Valerie Silverthorne","2021-11-09","\nMastering software developer hiring has never been more critical – or more difficult.\n\nIn fact, it’s almost the perfect storm: There’s a global and growing shortage of developers; voluntary job turnover rates in the US are 25% (almost double what they were three years ago); and demand for skilled engineers is expected to outstrip supply by 1.2 million in three years, according to the Bureau of Labor Statistics. \n\nAt the same time, what developers want, or will settle for, has changed, perhaps at least partially driven by the pandemic. Developers want meaningful challenges, a flexible work-life balance, tools and processes that don’t slow them down, and, increasingly, the option to work completely remotely.\n\nSo what can you do to keep your developers from leaving and make software developer hiring easier? Here’s our best advice:\n\n* Understand the developer mentality. “Of all the tech roles, developers are the most fickle,” says GitLab’s staff developer evangelist [Brendan O’Leary](/company/team/#brendan), who, with nearly 20 years experience as a developer, is in a very good position to know. “They don’t want to put up with a lot and tend to have strong opinions.” \n\n* Stop measuring. It is possible to tie developer productivity to results, and not a mandatory 40-hour work week. How do we know this? Because that’s how GitLab operates, by [measuring results and not hours spent](https://handbook.gitlab.com/handbook/values/#results). “Companies need to stop measuring knowledge workers, like developers, by the hours they spend,” O'Leary says. “That’s the worst thing you could do.” Instead, build a culture that values paid time-off, family leave, and other work-life balance efforts because those will resonate with developers, he stresses. \n\n* Up your tool game. The [science has spoken](/blog/why-software-developer-job-satisfaction-matters-and-how-to-make-it-happen/) and developers drowning in information overload aren’t as productive, which ties directly into job satisfaction and happiness. In our [2021 Global DevSecOps Survey](/developer-survey/), we heard a lot about tool chains with between five and 15 tools on them, and often there wasn’t just one tool chain in play, but several. That’s a lot of noise. A [DevOps platform](/solutions/devops-platform/) streamlines code development, testing, deployment, and monitoring and definitely improves a company’s ability to successfully do DevOps. When we asked respondents to tell us in their own words about the benefits of a DevOps platform, this comment summed it up:  “Reduced mean time to recovery (MTTR), quicker time to market, reduced lead time for fixes, and fewer change failures.” \n\n* Embrace flexible work. Nearly two years into a global pandemic, the time is right for companies to be deliberate about their choices. Developers are going to choose employers who have thought through all the options, whether it’s fully remote, remote with flexibility, or other combinations. But they’re not going to settle for companies trying to patchwork it without a solid plan. “Not every company is losing developers,” O'Leary says. “Developers are going to the places that understand the flexibility in life that can come from remote, while also not sacrificing any productivity.”\n\n* Don’t forget “concrete” perks. Free soda and “bring your dog to work” days probably aren’t enough to make developers consider your team, or decide to stay long term. Focus on what matters: time for volunteer or side projects, a collaborative culture, and demonstrable recognition of success. We offer [discretionary bonuses](/handbook/incentives/#discretionary-bonuses). Some companies send handwritten notes from senior leadership, while others meet monthly for group celebrations. Whatever you do, just make it sure it’s authentic.\n\n* Assess the skills gap. You’ve got unfilled roles and DevOps team members itching for a change. Why not marry the two?  West Monroe, a Chicago-based technology consulting firm, found 56% of managers surveyed rated their organization’s skills gap [as moderate to severe](https://www.westmonroe.com/perspectives/signature-research/the-upskilling-crisis-effectively-enabling-and-retraining-employees-for-the-future?utm_source=google&utm_medium=cpc&utm_term=upskilling&utm_content=!acq!v3!118035700243_kwd-333379491008__501805835687_g_c__&utm_campaign=Search%3A+Prospecting%3A+BA%3A+Priority+Content%3A+Gated%3A+Tier+3_BBM&atrkid=V3ADWED098667_118035700243_kwd-333379491008__501805835687_g_c___&gclid=CjwKCAjw7--KBhAMEiwAxfpkWF1Xg74_9zydAzfcJLt0t90OMh7MYsyV3yOfwK4bJWt-OBX1BzW2mRoClv4QAvD_BwE). And a survey from the McKinsey Quarterly discovered 53% of executive respondents felt [reskilling](https://www.mckinsey.com/business-functions/mckinsey-accelerate/our-insights/five-fifty-the-skillful-corporation?cid=fivefifty-eml-alt-mkq-mck&hlkid=a7a8ae1b68574d02b81db1f1eeb8fd8d&hctky=12428831&hdpid=8233aa33-5ff4-4450-a4c7-2f47dfeaf9d0) was the best solution to the skills gap. So stand out from the crowd and offer solid learning paths to employees, as well as tuition reimbursement. At the very least, offer your DevOps team time for [DIY learning](/blog/best-advice-for-your-devops-career-keep-on-learning/), as needed. Also consider [job swapping](https://www.managersorbit.com/job-swapping-benefits/), which can be a great way to expose employees to new career opportunities.\n \n_Sharon Gaudin contributed to this blog post._\n\n\n## Read more on DevOps careers: \t\t\n\n- [Best advice for your DevOps career? Keep on learning](/blog/best-advice-for-your-devops-career-keep-on-learning/)\n\n- [Four tips to increase your DevOps salary](/blog/four-tips-to-increase-your-devops-salary/)\n\n- [DevOps salaries in 2021: Where do you rank?](/blog/a-look-at-devops-salaries/)\n\n- [Have DevOps jobs to fill? Try these 3 strategies to hire and retain](/blog/have-devops-jobs-to-fill-try-these-3-strategies-to-hire-and-retain/)\n\n\n\n",[813,9,855],"agile",{"slug":857,"featured":6,"template":686},"6-tips-to-make-software-developer-hiring-easier","content:en-us:blog:6-tips-to-make-software-developer-hiring-easier.yml","6 Tips To Make Software Developer Hiring Easier","en-us/blog/6-tips-to-make-software-developer-hiring-easier.yml","en-us/blog/6-tips-to-make-software-developer-hiring-easier",{"_path":863,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":864,"content":870,"config":876,"_id":878,"_type":14,"title":879,"_source":16,"_file":880,"_stem":881,"_extension":19},"/en-us/blog/6-ways-smbs-can-leverage-the-power-of-a-devops-platform",{"title":865,"description":866,"ogTitle":865,"ogDescription":866,"noIndex":6,"ogImage":867,"ogUrl":868,"ogSiteName":670,"ogType":671,"canonicalUrls":868,"schema":869},"6 ways SMBs can leverage the power of a DevOps platform","Bringing a DevOps platform into a small business can be a game changer. It can also cut down on the hat wearing. Here are the top 6 benefits.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668224/Blog/Hero%20Images/inside-our-new-development-team-lead-persona.jpg","https://about.gitlab.com/blog/6-ways-smbs-can-leverage-the-power-of-a-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"6 ways SMBs can leverage the power of a DevOps platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-04-12\",\n      }",{"title":865,"description":866,"authors":871,"heroImage":867,"date":872,"body":873,"category":769,"tags":874},[810],"2022-04-12","\nA small or medium-sized business (SMB) or enterprise (SME) is likely working with a small staff but facing a big workload and even bigger expectations. Creating applications that will expand the customer base, keep up with a changing market, and take on competitors with deeper pockets can be daunting.\n\nIt’s possible to ease those burdens by choosing a single, end-to-end DevOps platform. Productivity will skyrocket and so will opportunities to [grow the company](https://page.gitlab.com/resources-ebook-smb-beginners-guide-devops.html).\n\nOf course, DevOps offers significant technical benefits, like testing and building at scale with [continuous integration and continuous delivery](/blog/how-to-keep-up-with-ci-cd-best-practices/), a shorter lead time with automated deployment, and [fewer production failures with earlier error detection](/blog/iteration-on-error-tracking/). But a DevOps platform also offers myriad business benefits to help support and expand a start-up or SMB.\n\nHere are six more ways a DevOps platform can help an SMB:\n\n## Improved customer satisfaction\n\nUsing a DevOps platform means iteration can happen faster. And that’s critical for SMBs that need to be able to quickly make changes to meet customer needs. DevOps also provides a way to [better monitor users’ feedback](/blog/cd-unified-monitor-deploy/) and makes it easier to respond with more speed and agility. And it reduces Change Failure Rates, increasing application reliability and stability.\n\nAll of this means SMBs will be more able to give clients what they want and need, all while creating an engaging customer experience. Closer customer ties create trust and keep users loyal to products. \n\n## Better security\n\nA DevOps platform embeds security to help seamlessly achieve a DevSecOps approach, a cornerstone of [incorporating security scanning early in the software development lifecycle](/blog/efficient-devsecops-nine-tips-shift-left/). By integrating testing and security reviews earlier in the process, and by using end-to-end automation, there are more opportunities to quickly and efficiently address any security issues. This reduces the time between designing new, higher-quality features and rolling them out into production. That's the beauty of a platform approach to DevOps – security isn't an afterthought. It’s part of the entire process.\n\nDevOps not only speeds production but creates more secure applications. And, simply put, more secure software makes for a more trusted product offering… and for happier, more satisfied customers.\n \n## True collaboration and innovation\n\nCollaboration is one of the basic tenets of DevOps. By [fostering communication and innovation](/blog/collaboration-communication-best-practices/), DevOps not only encourages developers and IT to work together, it also supports collaboration throughout the entire company. This is one area where SMBs have a huge advantage: With fewer employees, who also might be less set in their ways, collaboration and innovation are inherently more inclusive in a small business. [An SMB or start-up is never too small for DevOps](/blog/can-an-smb-or-start-up-be-too-small-for-a-devops-platform/). By inviting discussion and assistance from all team members, DevOps creates a culture built around learning from and relying on others’ expertise; it also brings more ideas to the table. \n\n## Happier employees and better retention\n\nThe greatest resource a company has is its people. This is even more true for small companies where the pain of employee dissatisfaction and departure is felt even more acutely. Managers also don’t want projects waylaid because the people driving them are leaving.\n\nTo stop that from happening, it’s critical the workplace [keeps employees happy](/blog/why-software-developer-job-satisfaction-matters-and-how-to-make-it-happen/). \n\nRetaining a tech team isn’t just about perks, like in-office meditation pods, cereal stations, and foosball tables. Companies also need to give developers the processes and tools they need to be efficient, add automation, and make it easier to find and fix security and compliance issues. A single, end-to-end DevOps platform offers a solution for all of those issues. In our [2021 Global DevSecOps Survey](/developer-survey/), more than 13% of respondents said DevOps makes developers happier or makes their team more attractive to potential new employees. \n\n## Improved decision-making\n\nSmall or medium-sized businesses may lack their larger competitors’ resources, but their agility helps them quickly turn a big idea into action that grows the customer base and profits. A DevOps platform has built-in processes and methods to help sustain an SMB’s agile advantage as it grows, so innovative ideas can scale more quickly and smoothly into products, and ultimately new lines of revenue. Automate more and with higher visibility to make fewer and better decisions.\n\n## Wear all the hats\n\nIt might be a cliche, but it’s also true: SMB employees have to wear all the hats. Code writing, customer service, trouble-shooting, accounts payable… SMB teams are masters at multitasking, but that’s not always the most productive way to be.\n\nA DevOps platform makes it [easier to reduce context-switching](/blog/want-faster-releases-your-answer-lies-in-automated-software-testing/) and work cross-functionally because everyone is using the same tool. Built-in automation reduces the number of tasks that need to be done manually and aids in collaboration. \n\nAt the end of the day, a complete DevOps platform isn’t a shiny toy, it’s a critical SMB tool. Adopting a platform can make an SMB even more nimble, efficient, and able to scale. DevOps readies an SMB to take on bigger competitors with deeper pockets. And that will enable the business to become what its founders and executives envision.\n",[9,875,109],"security",{"slug":877,"featured":6,"template":686},"6-ways-smbs-can-leverage-the-power-of-a-devops-platform","content:en-us:blog:6-ways-smbs-can-leverage-the-power-of-a-devops-platform.yml","6 Ways Smbs Can Leverage The Power Of A Devops Platform","en-us/blog/6-ways-smbs-can-leverage-the-power-of-a-devops-platform.yml","en-us/blog/6-ways-smbs-can-leverage-the-power-of-a-devops-platform",{"_path":883,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":884,"content":890,"config":896,"_id":898,"_type":14,"title":899,"_source":16,"_file":900,"_stem":901,"_extension":19},"/en-us/blog/a-3-step-plan-for-devops-platform-migration",{"title":885,"description":886,"ogTitle":885,"ogDescription":886,"noIndex":6,"ogImage":887,"ogUrl":888,"ogSiteName":670,"ogType":671,"canonicalUrls":888,"schema":889},"A 3-step plan for DevOps platform migration","Too many tools = too much time wasted. Use our 3-step plan and detailed checklist to jumpstart a DevOps platform migration.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668319/Blog/Hero%20Images/more-robust-task-lists.jpg","https://about.gitlab.com/blog/a-3-step-plan-for-devops-platform-migration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A 3-step plan for DevOps platform migration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lauren Minning\"}],\n        \"datePublished\": \"2022-08-25\",\n      }",{"title":885,"description":886,"authors":891,"heroImage":887,"date":893,"body":894,"category":769,"tags":895},[892],"Lauren Minning","2022-08-25","\n\nWhen making your DevOps platform migration plan, less really is more, at least when it comes to tools.\n\nOur [2022 Global DevSecOps Survey](/developer-survey/) found that not only do teams have _lots_ of tools, they spend a significant amount of time managing them. All told 40% of developers spend between one quarter and one half of their time on toolchain maintenance and integration, and another 33% spend between 50% and **all** of their time on this task. So it’s hardly a surprise that 69% of survey takers said they want to consolidate their toolchains.\n\nOne obvious way to consolidate is migrating to a DevOps platform. DevOps platform migration does take some planning and teamwork, but it can be done. Here’s a 3-step plan (and a self-evaluation checklist) to get teams started.\n\n## Choose the right path\n\nThe most important thing to know about migrating to an end-to-end DevOps platform is that everyone's needs are different so there isn’t one “right way” to carry out your migration.\n\nA company that has 1,000 users will have completely different DevOps needs than a company that has 5,000 users. What your specific DevOps platform migration plan requires will depend on the types of projects you migrate, the file types within those projects, and a whole host of other parameters. Because of this, there is not a “one size fits all” migration process for everyone to follow. \n\nHere’s a basic 3-step guide for migrating to a DevOps platform:\n\n**Begin by identifying** the strategic goals and be clear about why they are a priority for future business plans.\n\n**Evaluate tools** currently in use that no longer serve future goals. Ultimately the goal should be to operate entirely out of a single application for maximum efficiency. But it may make sense to migrate some things now and others down the line. \n\nThis is the time to become a historian and discern which tools have been problematic in the past. Consider what to migrate right away or later on and why (i.e., instability or costly maintenance and licensing) and really use that to inform the migration process. \n\n_An important note: Take into consideration the business disruption that migration has on a company. Replacing existing tools with a new DevOps platform in one step could mean sweeping changes across the organization, and the fallout might not be worth it. Instead, start with the things taking time, effort and money to maintain. And continue to keep it as simple and streamlined as possible._\n\n**Have everyone** on the team complete a self-evaluation so there are no surprises.\n\n## Do a self-evaluation \n\nHere are key questions to ask:\n\n- What’s the timeline? Discuss with all involved parties – existing team members and a representative of the new DevOps platform – how much time to allot for a completed migration. Migrations can take anywhere from 2 weeks for the initial migration to 3-6 months for monitoring. \n\n- What are the costs? This kind of platform adoption can ultimately save a LOT of money. However, the adoption of a new DevOps platform and the associated migration will no doubt have costs. Consider all costs and make sure they align with budgetary goals and requirements.\n\n- What about assistance? Are other parts of the company prepared to support a migration? How much of this will require work from the existing team and how much support will the DevOps platform provider offer? \n\n- Who are the primary and other platform users? What teams of people will migrate to this new platform? Will everyone have the same level or different levels of permissions? What needs to be done so that these teams are prepared to learn and teach the ins and outs of the new platform to other team members? \n\n- What data is migrating? Make sure to have a 360 view of the data involved in a migration including, projects, issues, and file types. What changes can happen with data when moving to a brand new DevOps platform? When evaluating the projects planned for migration, explore which applications teams spend the most time and energy working with, and what will set them up for success in the new platform.\n\n- How will automation fit in? Ensure teams understand the technology underpinnings of automation, like Kubernetes, CI/CD and more.\nHow should it be customized? Not every tool on a DevOps platform will be right for every team, and some tools might be a better fit at a later date. It makes sense to address any technology “outliers” right from the start. \n\n- Should the process be documented? Every step of the migration process should be documented and shared across teams. This level of transparency and an iterative, easy-to-search knowledge base can help problem-solve and refer back to stages already completed. Much like a single source for DevOps, a single source of truth for DevOps migration info helps everyone involved. \n\n- What about security? Security is never a “one and done,” but this is a good time to consider processes and levels of protection.\nWhat are good results?: What will a successful migration look like – when data is moved, or when teams are comfortable in their knowledge and use of the new system? Map out what the goals that will be critical to a successful migration.\n\nCheck out our _[Migrating to a DevOps platform](https://page.gitlab.com/migrate-to-devops-guide.html)_ eBook  for even more useful information about how to complete a successful DevOps platform migration.\n",[9,681,875],{"slug":897,"featured":6,"template":686},"a-3-step-plan-for-devops-platform-migration","content:en-us:blog:a-3-step-plan-for-devops-platform-migration.yml","A 3 Step Plan For Devops Platform Migration","en-us/blog/a-3-step-plan-for-devops-platform-migration.yml","en-us/blog/a-3-step-plan-for-devops-platform-migration",{"_path":903,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":904,"content":910,"config":917,"_id":919,"_type":14,"title":920,"_source":16,"_file":921,"_stem":922,"_extension":19},"/en-us/blog/a-look-ahead-for-gitlab-cicd",{"title":905,"description":906,"ogTitle":905,"ogDescription":906,"noIndex":6,"ogImage":907,"ogUrl":908,"ogSiteName":670,"ogType":671,"canonicalUrls":908,"schema":909},"New up and coming GitLab CI/CD Features","DAG, Multi-project Pipelines, Runner Setup for Kubernetes and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666889/Blog/Hero%20Images/photo-cicd12xlookahead.jpg","https://about.gitlab.com/blog/a-look-ahead-for-gitlab-cicd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"New up and coming GitLab CI/CD Features\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jason Yavorska\"}],\n        \"datePublished\": \"2019-08-07\",\n      }",{"title":905,"description":906,"authors":911,"heroImage":907,"date":913,"body":914,"category":791,"tags":915},[912],"Jason Yavorska","2019-08-07","\n_Since this blog post was published, we have updated our planning based on emerging priorities and customer need. For the latest on what we've got coming next, check out our [CI/CD direction page](/direction/ops/), which is always current._\n\nHey everyone, [Jason Yavorska](https://gitlab.com/jyavorska) here – product manager for CI/CD at GitLab. Back in June we\nreached the mid-point of the year and we're heading into our big 12.0 release, so I took the opportunity to\nsummarize some of the [highlights of our 11.x series of releases](/blog/look-back-on-11-11-cicd/).\nHopefully you had a chance to read it, if not, please take a moment to scan through and I bet you'll find an\ninteresting feature or two that can help improve your pipelines.\n\nWe're a couple of releases into the 12.x cycle now and I couldn't wait to share some\nof the things that we're looking forward to delivering the remainder of this year. Some of the features I am most excited about include DAG, a directed acyclic graph that makes it easy to run pipeline steps out of order, expanding our pipelines for merge requests/results feature to also work with forks, as well as making multi-project pipelines a Core feature. With about 3.44M job instances per week/13.76M per month, GitLab CI is growing at a rapid rate to help our customers and users with their deployment needs. Read on below to learn more about all of the exciting CI/CD features in the 12.0 series of releases that will help you to deploy your code quickly.\n\n## What's recent\n\nIn 12.0, we released [visual reviews](https://docs.gitlab.com/ee/ci/review_apps/index.html#visual-reviews),\nwhich allows users to provide issue feedback directly from the review apps that\nyour pipelines create. This makes it easy for all your team members to provide accurate\nfeedback on the changes you're making. We also added [collapsible job logs](https://docs.gitlab.com/ee/ci/pipelines/index.html#expand-and-collapse-job-log-sections),\nmaking output of pipelines easier to use, and enabled [multiple extends](https://docs.gitlab.com/ee/ci/yaml/#extends)\nfor pipeline jobs to make templatizing behaviors in your configuration even easier.\n\n![Visual Review Apps](https://about.gitlab.com/images/12_0/visual-review-apps.png \"Visual Review Apps\"){: .shadow.medium.center}\n\n[Visual Review Apps](https://docs.gitlab.com/ee/ci/review_apps/index.html#visual-reviews) were released in GitLab 12.0\n{: .note .text-center}\n\nIn 12.1, we delivered [parallel execution for merge trains](https://docs.gitlab.com/ee/ci/pipelines/merge_trains.html),\nexpanding on our [pipelines for merged results](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html)\nto make it very easy to automatically build and test a series of merge requests heading\ninto the same target branch in a fast, safe, and efficient way. For GitLab Pages we also\nadded [automatic HTTPS certificate renewal](https://docs.gitlab.com/ee/user/project/pages/custom_domains_ssl_tls_certification/lets_encrypt_integration.html),\nand completely refactored the GitLab Runner to be able to be [extensible for custom behaviors](http://docs.gitlab.com/runner/executors/custom.html),\nenabling many new kinds of operation modes for your runners including but not limited to\nsupporting any kind of proprietary virtualization environment.\n\n## What's next\n\nNow that you're up to speed with the first couple of 12.x releases, let's look ahead to what's coming next in each monthly release from 12.2 this month to 12.6 in December.\n\n## 12.2 (August 22)\n\n_Since this blog post was published, we have updated our planning based on emerging priorities and customer need. For the latest on what we've got coming next, check out our [CI/CD direction page](/direction/ops/), which is always current._\n\n12.2 is just around the corner and it's also looking to be a big one.\n\nOne really exciting feature for this release is that we're adding a hybrid directed acyclic graph (DAG) to GitLab CI.\nThis is really just a fancy way of saying you'll be able to run pipeline steps out of order, breaking the\nstage sequencing you're familiar with in GitLab, and allowing jobs to relate to each other directly. This can\nbe valuable for monorepo situations where you have different folders in your repo that can build, test, and maybe\neven deploy independently, or in general it can provide a nice speed boost for your pipeline steps that relate to\neach other (for example, things like artifact processing or sequential test runs.) Read more in our [public issue](https://gitlab.com/gitlab-org/gitlab-ce/issues/47063)\nabout how this great feature is going to work.\n\n![Directed Acyclic Graph](https://about.gitlab.com/images/blogimages/dag_execution.png \"Directed Acyclic Graph\"){: .shadow.medium.center}\n\nOut of order execution using the [Directed Acyclic Graph](https://gitlab.com/gitlab-org/gitlab-ce/issues/47063)\n{: .note .text-center}\n\nIn addition to the DAG, we're rethinking the way that [rules can be set up for pipelines](https://gitlab.com/gitlab-org/gitlab-ce/issues/60085),\nmaking it much easier to understand what a job is going to do compared with trying to figure out how a collection\nof `only/except` rules interact with each other. Another highlight is that we're adding the ability to\n[control behavior for individual users with Feature Flags](https://gitlab.com/gitlab-org/gitlab-ee/issues/11459) along with\n[percentage rollout across all users](https://gitlab.com/gitlab-org/gitlab-ee/issues/8240). These will give you a lot of\nflexibility to [progressively control](/direction/ops/#progressive-delivery) how changes are rolled out to your users\neven when the code is already in production.\n\n## 12.3 (September 22)\n\n_Since this blog post was published, we have updated our planning based on emerging priorities and customer need. For the latest on what we've got coming next, check out our [CI/CD direction page](/direction/ops/), which is always current._\n\nThe individual change in the 12.3 release that I'm most excited about has got to be\n[associating a milestone with a release](https://gitlab.com/gitlab-org/gitlab-ce/issues/62402). One of the greatest\nstrengths of GitLab is the connected ecosystem of features – by tying a release to a milestone, it becomes\npossible to connect all kinds of interesting data in GitLab to the release – issues, merge requests, and more, all\nat your fingertips and curated automatically by GitLab.\n\nWe're also going to be making [runner setup for Kubernetes](https://gitlab.com/gitlab-org/gitlab-ce/issues/63768)\nrequire just a single click to get going, and making a key architectural change to GitLab Pages that will\n[bring initial availability time for pages site down to nearly instantaneous](https://gitlab.com/gitlab-org/gitlab-ce/issues/61929).\n\n## 12.4 (October 22)\n\n_Since this blog post was published, we have updated our planning based on emerging priorities and customer need. For the latest on what we've got coming next, check out our [CI/CD direction page](/direction/ops/), which is always current._\n\nFirst up, we're planning on adding a [Hashicorp Vault integration](https://gitlab.com/gitlab-org/gitlab-ce/issues/61053) that will let you tie your\nGitLab CI pipelines to your Vault instance, making it possible to keep crucial build and deployment secrets outside\nof GitLab entirely.\n\nWe're also [expanding our pipelines for merge requests/results feature to also work with forks](https://gitlab.com/gitlab-org/gitlab-ee/issues/11934),\nand (building on top of the newly associated milestone) delivering an MVC for fully automated [evidence collection for releases](https://gitlab.com/gitlab-org/gitlab-ce/issues/56030).\nThis means that things like test results, pipeline outputs, merge requests, and issues will have a snapshot\navailable for auditing and review in the context of a release, all collected automatically from throughout GitLab\nwithout having to write a line of code.\n\n## 12.5 (November 22)\n\n_Since this blog post was published, we have updated our planning based on emerging priorities and customer need. For the latest on what we've got coming next, check out our [CI/CD direction page](/direction/ops/), which is always current._\n\nFor 12.5, we plan to tackle Helm v3 charts by providing features in our container registry to\nmanage these. [Helm v3](https://helm.sh/blog/helm-3-preview-pt1/) changes a lot about how charts work, and\nwe want to ensure that GitLab is there with you as you start to adopt this very different, but powerful new way\nof working.\n\nWe also plan to revisit [how workspaces are defined and shared](https://gitlab.com/gitlab-org/gitlab-ce/issues/62802),\nmaking it easier to build up a common staging area that can be shared by different jobs/pipelines in an easier-to-use,\nmore natural way than by using the cache or artifacts in GitLab today. Last but not least, we're improving on\nour testing parallelization features by making it possible to [leave the parallelization tuning to GitLab itself](https://gitlab.com/gitlab-org/gitlab-ee/issues/12282).\n\n## 12.6 (December 22)\n\n_Since this blog post was published, we have updated our planning based on emerging priorities and customer need. For the latest on what we've got coming next, check out our [CI/CD direction page](/direction/ops/), which is always current._\n\nFor the holidays we're planning on [making multi-project pipelines a Core feature](https://gitlab.com/gitlab-org/gitlab-ce/issues/63497),\nbringing this powerful capability to all of our users. More and more we're hearing that teams are using multi-project\npipelines in all kinds of interesting ways to solve unique problems, and we want to make this feature available to\neveryone who can benefit. EDIT 2020-01-02: We resolved [this issue](https://gitlab.com/gitlab-org/gitlab/issues/31573) back in 12.4 where the trigger keyword was not working in certain cases, which satisfied the request of the folks in that issue to open source the feature. There are potential executive dashboards for cross-project pipelines in the future which will be paid features, but using triggering is in core and working fine. If there are any use cases that are not working for you, please ping me (@jyavorska) in [gitlab#29626](https://gitlab.com/gitlab-org/gitlab/issues/29626) and I'd be happy to take a look.\n\nWe are also bringing in a whole new way of working with GitLab CI/CD: [child/parent pipelines](https://gitlab.com/gitlab-org/gitlab-ce/issues/22972).\nUsing these you'll be able to trigger downstream pipelines from your main pipeline; these will run completely independently\nand in their own separate namespace from the main pipeline, but will provide status attribution back to the main pipeline. These\nchild pipelines are definable in YAML files anywhere in your repo, so if you have a monorepo (for example) you'll be able to organize\nthese independent pipelines separately but still orchestrate them from a central command and control module.\n\nFinally, we're looking to improve how we show the [change in pipeline duration over time](https://gitlab.com/gitlab-org/gitlab-ee/issues/1806)\nas well as how [test runs are changing over time](https://gitlab.com/gitlab-org/gitlab-ee/issues/1020). This trend data will make\nit easier to manage the performance of your pipelines on an ongoing basis.\n\n## In conclusion\n\nHopefully you're as excited about these features as much as we are. We'd love for you to participate\nin the public issues so we can work together to deliver these features with your input. It's\npossible some specific items may change, but overall\nthis is the direction we're headed as we continue to add iterative improvements across all of CI/CD in\nevery release.\n\nInterested in learning more about GitLab CI/CD in general, and seeing all the rest of\nthe items we plan to deliver? Visit our [CI/CD strategy page](/direction/ops/)\nfor our themes, priorities, and more details on what's coming next.\n\nPhoto by [Reginar](https://unsplash.com/photos/4fQAMZNaGUo?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/arrow?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,109,916],"features",{"slug":918,"featured":6,"template":686},"a-look-ahead-for-gitlab-cicd","content:en-us:blog:a-look-ahead-for-gitlab-cicd.yml","A Look Ahead For Gitlab Cicd","en-us/blog/a-look-ahead-for-gitlab-cicd.yml","en-us/blog/a-look-ahead-for-gitlab-cicd",{"_path":924,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":925,"content":931,"config":937,"_id":939,"_type":14,"title":940,"_source":16,"_file":941,"_stem":942,"_extension":19},"/en-us/blog/a-look-at-devops-salaries",{"title":926,"description":927,"ogTitle":926,"ogDescription":927,"noIndex":6,"ogImage":928,"ogUrl":929,"ogSiteName":670,"ogType":671,"canonicalUrls":929,"schema":930},"DevOps salaries in 2021: where do you rank?","Another surprise benefit of working on a DevOps platform? A higher salary! Here's why DevOps salaries are going up, and where to find the biggest paychecks.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663397/Blog/Hero%20Images/logoforblogpost.jpg","https://about.gitlab.com/blog/a-look-at-devops-salaries","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevOps salaries in 2021: where do you rank?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2021-10-07\",\n      }",{"title":926,"description":927,"authors":932,"heroImage":928,"date":933,"body":934,"category":769,"tags":935},[810],"2021-10-07","\n_This is the first in an occasional series of blog posts looking at DevOps salaries and careers._\n\nDespite the COVID-19 pandemic and the subsequent economic crisis that has disrupted lives and business across industries and around the world, demand for DevOps professionals remains strong and salaries continue to increase.\n\nThe IT industry, in general, fared better than many during the economic uncertainty of 2020 and 2021. With a strong IT infrastructure already in place and IT professionals accustomed to working remotely, increased demand and short supply for IT workers meant [IT salaries held steady or rose](https://rlc.randstadusa.com/for-business/learning-center/salary-insights/salary-guide/IT-technologies) in turbulent times, reported Randstad, a multinational human resources consulting firm. \n\nAnd DevOps professionals did even better than most in IT.\n\n## DevOps salaries are on the rise\n\n[DevOps, simply put,](/topics/devops/) is one of the hottest areas in the technology industry. Robert Half International Inc., a major human resources consulting firm, lists DevOps in the top 10 most in-demand jobs in 2021. Actually, in early September the firm [listed DevOps as the second hottest IT job](https://www.roberthalf.com/blog/salaries-and-skills/the-13-highest-paying-it-jobs-in-2019), just behind big data engineers, and surpassing cloud architects, security managers and database managers. And Randstad also ranked DevOps high in its [list of in-demand technology roles.](\u003Chttps://rlc.randstadusa.com/for-business/learning-center/salary-insights/salary-guide/IT-technologies>)\n\nAccording to salary watchers like Randstad, Glassdoor and ZipRecruiter, DevOps engineers, for instance, generally make approximately $100,000 to $150,000. Based on average U.S. salaries on Glassdoor, DevOps engineers are number 8 for 10 top-paying IT jobs in 2021. And DevOps developers, who Randstad calls one of the most in-demand technology roles, are doing well, too. They are in line to make $112,785 (for those with one year of experience) to $165,980 (for 10 years or more of experience). Those figures, of course, greatly depend on location and skill level.\n\nJust to drive the point home, Amanda Stansell, a data scientist at Glassdoor, said in a report earlier this year that [DevOps engineers](https://about.gitlab.com/topics/devops/what-is-a-devops-engineer/) made her list of [Top 10 Best Jobs in America for 2021.](\u003Chttps://www.glassdoor.com/research/best-jobs-in-america-for-2021/>) She bases her calculations on earning potential, overall job satisfaction, and number of job openings listed on Glassdoor. The role of DevOps engineer came in squarely in the middle at #5 - between Java developer at #1 and dentist at #10.\n\n## Demand for DevOps professionals is skyrocketing\n\nAccording to the Randstad 2021 Salary Guide, demand for DevOps developers is currently skyrocketing. “For employers… the average time-to-fill for these roles is north of 50 days,” the report noted. “That likely means many would-be employers today are instead suffering from key vacancies in their IT departments. Worse, with average annual salaries for DevOps developers at $137,830 — higher even than the average for cloud engineers — organizations should expect to spend considerable budget just to be in the running for skilled and experienced developers. It’s a pay-to-play hiring environment.”\n\n## Breaking down the DevOps salaries\n\nLet’s take a closer look at how pay for some DevOps positions breaks down:\n\n* ZipRecruiter reports that a DevOps engineer in San Francisco can take home $132,934, while the same position in Boston, Mass. would garner $113,552. In Austin, Texas, that engineer could earn $110,240 but in Boise, Idaho that drops to $102,093.\n\n* According to ZipRecruiter, the top five [highest paying cities for DevOps engineers](https://www.ziprecruiter.com/Salaries/Devops-Engineer-Salary) are Sunnyvale, Calif. (at $144,494); Santa Rosa, Calif. ($139,673); Cambridge, Mass. ($135,440); Vacaville, Calif. ($132,838), and New York City ($131,356).\n\n* The [top five best states](https://www.ziprecruiter.com/Salaries/What-Is-the-Average-Devops-Engineer-Salary-by-State) for DevOps engineers to earn the most are Massachusetts, Hawaii, Connecticut, Tennessee and Minnesota. \n\n## Read more on DevOps careers: \t\t\n\n- [Best advice for your DevOps career? Keep on learning](/blog/best-advice-for-your-devops-career-keep-on-learning/)\n\n- [6 tips to make software developer hiring easier](/blog/6-tips-to-make-software-developer-hiring-easier/)\n\n- [Four tips to increase your DevOps salary](/blog/four-tips-to-increase-your-devops-salary/)\n\n- [Have DevOps jobs to fill? Try these 3 strategies to hire and retain](/blog/have-devops-jobs-to-fill-try-these-3-strategies-to-hire-and-retain/)\n",[9,813,936],"startups",{"slug":938,"featured":6,"template":686},"a-look-at-devops-salaries","content:en-us:blog:a-look-at-devops-salaries.yml","A Look At Devops Salaries","en-us/blog/a-look-at-devops-salaries.yml","en-us/blog/a-look-at-devops-salaries",{"_path":944,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":945,"content":951,"config":956,"_id":958,"_type":14,"title":959,"_source":16,"_file":960,"_stem":961,"_extension":19},"/en-us/blog/a-snapshot-of-modern-devops-practices-today",{"title":946,"description":947,"ogTitle":946,"ogDescription":947,"noIndex":6,"ogImage":948,"ogUrl":949,"ogSiteName":670,"ogType":671,"canonicalUrls":949,"schema":950},"A snapshot of modern DevOps practices today","We consulted three market research firms for their take on DevOps today and in the future. Here's what they said about modern DevOps practices.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668495/Blog/Hero%20Images/how-you-can-help-shape-the-future-of-securing-applications-at-gitlab.jpg","https://about.gitlab.com/blog/a-snapshot-of-modern-devops-practices-today","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A snapshot of modern DevOps practices today\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-10-31\",\n      }",{"title":946,"description":947,"authors":952,"heroImage":948,"date":953,"body":954,"category":769,"tags":955},[851],"2022-10-31","At almost 15 years old, DevOps has been around long enough to settle in and take shape at organizations around the world. But what do “modern” DevOps practices look like today, and how are they likely to change? Three market research firms gave us their take on the current generation of DevOps, and what’s coming next. \n\n## BizDevOps anyone?\n\nIf there’s one clear sign of DevOps maturity, it’s the fact that the business side has seamlessly inserted itself into what was forever a bastion of technologies and tech-driven practices. With some of the [bigger DevOps battles well in hand](/developer-survey/) (broader adoption of automation, more frequent deployments, and increased software testing), teams are able to bring in new metrics, including user experience, customer satisfaction, and other business drivers. 451 Research found business objectives and outcomes are the leading priority (51%) for enterprises as they refine, improve, and expand their DevOps implementations. In fact, 451 said business metrics are now almost as important a measure of DevOps success as technical achievements like application performance and quality.(1)\n\nFurther underscoring the way modern DevOps practices have broadened their focus is the increasing interest in value stream management, which looks at the software development lifecycle from idea generation to customer delivery and satisfaction level. Research firm IDC, in its \"Accelerated App Delivery Survey 2021\" (U.S. Results)(3), published January 2022, said value stream management is going to be one of the top investment priorities for DevOps teams this year. \n\n## DevOps and security\n\nBut the focus on business objectives doesn’t mean that work on the tech side of DevOps is done; in fact, DevSecOps and security in DevOps in general continue to be a tricky balance for many teams. IDC, also in its \"Accelerated App Delivery Survey 2021\" (referenced above), notes the cognitive dissonance of DevOps teams saying security is a top priority and feeling confident about their security posture while at the same time acknowledging DevSecOps is only in use for 25% or less of application development on average. \n\nForrester Research, in its \"State of Application Security, 2022\" (May 9, 2022), said: “Savvy security professionals know that to drive application security tool adoption, they must engage developers in the technology decision-making process. With both tooling\ndecision-making and budget moving to development, security pros must redefine\ntheir role in application security and take advantage of the opportunity to become\nmore strategic.”\n\n## The role of DevOps platforms\n\nSecurity is an ongoing substantive issue on DevOps teams but there are also a number of smaller, but still significant, problems teams need to solve, including [toolchain debt](/blog/battling-toolchain-technical-debt/), the challenge of scaling, and the need for a product and platform structure. DevOps platforms can help with all of those challenges.\n\nFor starters, it’s nearly impossible to scale DevOps throughout an enterprise without a DevOps platform supporting the effort. A platform provides a single source of truth for all teams, eliminates handoffs, and allows visibility into every stage of the process. A DevOps platform also helps eliminate the inefficiencies caused by too many tools and toolchains. Our [2022 Global DevSecOps Survey](/developer-survey/) found 69% of teams want to streamline their toolchains to reduce time spent on maintenance/integration and improve developer quality of life.\n\nWhat does a DevOps platform look like in 2022? Forrester Research, in \"The Forrester Guide to DevOps 2022\" (September 14, 2022) said modern DevOps platforms are “integrated and automated; create a software automation abstraction layer; use SLAs to drive continuous improvement; and have cloud platforms as deployment targets of choice.”\n\n## Culture (still) matters\n\nIn the early days of DevOps the talk was **all** about the culture challenge of bringing the vastly different dev and ops teams together. Somewhat surprisingly, market research firms are still talking about culture today, perhaps because the definition of DevOps has expanded to include more than just dev, ops, and even sec: BizDevSecUXTestPlatformLowCodeOps... ad infinitum, apparently.\n\nOrganizations wanting DevOps success must continue to push the importance of culture, collaboration, and communication, IDC reported in its \"Accelerated App Delivery Survey 2021.\" Forrester Research offered a stark assessment in \"The State of DevOps, 2022\" (June 27, 2022): “Never underestimate the importance of cultural transformation. Laggard organizations punish the bearers of bad tidings and don’t understand failure as a learning opportunity. Exorcizing these toxic attitudes is far easier said than done.”\n\n## Modern DevOps means modern technologies\n\nModern DevOps teams continue to incorporate new technologies into their practices. Two standouts: [AI/ML](/blog/why-ai-in-devops-is-here-to-stay/) and [GitOps](/blog/the-ultimate-guide-to-gitops-with-gitlab/). 451 points to rising interest in AIOps specifically to address the “too much information” problem with logs and metrics.(2) \n\n## Looking forward\n\nChange is of course a given and it’s safe to say that DevOps teams will face new organizational structures, new teammates, and complicated technology adoption challenges.\n\n### Cross-functional teams organized around products\n\nAfter years of bringing dev and ops together, some believe it’s time to reach out further. Forrester, in \"The Future of DevOps\" (June 8, 2022), said: “In the future, cross-functional teams, from business stakeholders to operational site reliability engineers (SREs), will organize around products, delivering business value via DevOps platforms.”\n\n### Wider and deeper platforms\n\nAnd those DevOps platforms “will consolidate, extend and deepen,” Forrester predicts in \"The Future of DevOps,\" cited above.\n\n### Introducing new teammates\n\nRoughly 66% of our 2022 DevSecOps Survey respondents told us their DevOps practices include a low code/no code tool. And that’s going to spread to all teams in the coming years. “Citizen development is a logical evolution of how enterprises deliver apps and enable digital business,” Forrester Research said in \"The Future of DevOps.\"\n\n### DevOps on the edge\t\n\nWith the Internet of Things and 5G becoming larger on the horizon, it’s not much of a stretch to predict modern DevOps teams will need to be able to support products with data literally [“on the edge.”](https://www.techtarget.com/searchdatacenter/definition/edge-computing) \n\n- [1] 451 Research, a part of S&P Global Market Intelligence, Mature DevOps Means Business, Jay Lyman, Senior Research Analyst, June 2022\n- [2] 451 Research, a part of S&P Global Market Intelligence, Business Objectives and Benefits Become Top Priority - Highlights from VotE DevOps, Jay Lyman, Senior Research Analyst, April 2022\n- [3] IDC, U.S. Accelerated Application Delivery Survey, Doc #US47924622, Jan 2022\n",[681,749,9],{"slug":957,"featured":6,"template":686},"a-snapshot-of-modern-devops-practices-today","content:en-us:blog:a-snapshot-of-modern-devops-practices-today.yml","A Snapshot Of Modern Devops Practices Today","en-us/blog/a-snapshot-of-modern-devops-practices-today.yml","en-us/blog/a-snapshot-of-modern-devops-practices-today",{"_path":963,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":964,"content":970,"config":979,"_id":981,"_type":14,"title":982,"_source":16,"_file":983,"_stem":984,"_extension":19},"/en-us/blog/a-visual-guide-to-gitlab-ci-caching",{"title":965,"description":966,"ogTitle":965,"ogDescription":966,"noIndex":6,"ogImage":967,"ogUrl":968,"ogSiteName":670,"ogType":671,"canonicalUrls":968,"schema":969},"A visual guide to GitLab CI/CD caching","Learn cache types, as well as when and how to use them.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682443/Blog/Hero%20Images/cover.jpg","https://about.gitlab.com/blog/a-visual-guide-to-gitlab-ci-caching","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A visual guide to GitLab CI/CD caching\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Matthieu Fronton\"}],\n        \"datePublished\": \"2022-09-12\",\n      }",{"title":965,"description":966,"authors":971,"heroImage":967,"date":973,"body":974,"category":791,"tags":975},[972],"Matthieu Fronton","2022-09-12","\n\nIf you've ever worked with GitLab CI/CD you may have needed, at some point, to use a cache to share content between jobs. The decentralized nature of GitLab CI/CD is a strength that can confuse the understanding of even the best of us when we want to connect wires all together. For instance, we need to know critical information such as the difference between artifacts and cache and where/how to place setups.\n\nThis visual guide will help with both challenges.\n\n## Cache vs. artifacts\n\nThe concepts _may_ seem to overlap because they are about sharing content between jobs, but they actually are fundamentally different:\n\n- If your job does not rely on the the previous one (i.e. can produce it by itself but if content already exists the job will run faster), then use cache.\n- If your job does rely on the output of the previous one (i.e. cannot produce it by itself), then use artifacts and dependencies.\n\nHere is a simple sentence to remember if you struggle between choosing cache or artifact:\n> Cache is here to speed up your job but it may not exist, so don't rely on it.\n\nThis article will focus on **cache**.\n\n## Initial setup\n\nWe'll go with a simple representation of the GitLab CI/CD pipelining model and ignore (for now) that the jobs can be executed on any runners and hosts. It will help get the basics.\n\nLet's say you have:\n- 1 project with 3 branches\n- 1 host running 2 docker runners\n\n![Initial setup](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-1.png){: .shadow.center}\n\n## Local cache: Docker volume\n\nIf you want a [local cache](https://docs.gitlab.com/ee/ci/caching/index.html#where-the-caches-are-stored) between all your jobs running on the same runner, use the [cache statement](https://docs.gitlab.com/ee/ci/yaml/#cache) in your `.gitlab-ci.yml`:\n\n```yaml\ndefault:\n  cache:\n    path:\n      - relative/path/to/folder/*.ext\n      - relative/path/to/another_folder/\n      - relative/path/to/file\n```\n\n![local / container / all branches / all jobs](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-2.png){: .shadow.center}\n\nUsing the [predefined variable](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html) `CI_COMMIT_REF_NAME` as the [cache key](https://docs.gitlab.com/ee/ci/yaml/index.html#cachekey), you can ensure the cache is tied to a specific branch:\n\n```yaml\ndefault:\n  cache:\n    key: $CI_COMMIT_REF_NAME\n    path:\n      - relative/path/to/folder/*.ext\n      - relative/path/to/another_folder/\n      - relative/path/to/file\n```\n\n![local / container / one branch / all jobs](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-3.png){: .shadow.center}\n\nUsing the [predefined variable](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html) `CI_JOB_NAME` as the [cache key](https://docs.gitlab.com/ee/ci/yaml/index.html#cachekey), you can ensure the cache is tied to a specific job:\n\n![local / container / all branch / one jobs](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-4.png){: .shadow.center}\n\n## Local cache: Bind mount\n\nIf you don't want to use a volume for caching purposes (debugging purpose, cleanup disk space more easily, etc.), you can configure a [bind mount for Docker volumes](https://docs.docker.com/storage/bind-mounts/) while registering the runner. With this setup, you do not need to set up the [cache statement](https://docs.gitlab.com/ee/ci/yaml/#cache) in your `.gitlab-ci.yml`:\n\n```yaml\n#!/bin/bash\n\ngitlab-runner register                             \\\n  --name=\"Bind-Mount Runner\"                       \\\n  --docker-volumes=\"/host/path:/container/path:rw\" \\\n...\n```\n\n![local / one runners / one host / all branch / all jobs](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-5.png){: .shadow.center}\n\nIn fact, this setup even allows you to share a cache between jobs running on the same host without requiring you to set up a distributed cache (which we'll talk about later):\n\n```yaml\n#!/bin/bash\n\ngitlab-runner register                             \\\n  --name=\"Bind-Mount Runner X\"                     \\\n  --docker-volumes=\"/host/path:/container/path:rw\" \\\n...\n\ngitlab-runner register                                 \\\n  --name=\"Bind-Mount Runner Y\"                         \\\n  --docker-volumes=\"/host/path:/container/alt/path:rw\" \\\n...\n```\n\n![local / multiple runners / one host / all branch / all jobs](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-6.png){: .shadow.center}\n\n## Distributed cache\n\nIf you want to have a [shared cache](https://docs.gitlab.com/runner/configuration/autoscale.html#distributed-runners-caching) between all your jobs running on multiple runners and hosts, use the \u003Ca href=\"https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-runnerscache-section\">[runner.cache]\u003Ca> section in your `config.toml`:\n\n```yaml\n[[runners]]\n  name = \"Distributed-Cache Runner\"\n...\n  [runners.cache]\n    Type = \"s3\"\n    Path = \"bucket/path/prefix\"\n    Shared = true\n    [runners.cache.s3]\n      ServerAddress = \"s3.amazonaws.com\"\n      AccessKey = \"\u003Cchangeme>\"\n      SecretKey = \"\u003Cchangeme>\"\n      BucketName = \"foobar\"\n      BucketLocation = \"us-east-1\"\n```\n\n![remote / multiple runners / multiple hosts / all branch / all jobs](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-7.png){: .shadow.center}\n\nUsing the predefined variable `CI_COMMIT_REF_NAME` as the cache key you can ensure the cache is tied to a specific branch between multiple runners and hosts:\n\n![remote / multiple runners / multiple hosts / one branch / all jobs](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-8.png){: .shadow.center}\n\n## Real-life setup\n\nThe above assumptions allowed you to harness your understanding of the concepts and possibilities.\n\nIn real life, you'll face more complex wiring and we hope this article will help you as a visual cheatsheet along with the reference documentation.\n\nJust to give you a sneak peek, here is an exercise for you:\n\n- Set up a cache between all the jobs of a specific stage, running on any runner and any hosts, but only between pipeline of the same branches:\n\n![Real-life test assignment](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-9.png){: .shadow.center}\n\nHappy caching, folks!\n\n\n\nCover image by [Alina Grubnyak](https://unsplash.com/@alinnnaaaa) on [Unsplash](https://unsplash.com)\n{: .note}\n",[976,977,9,978],"CI","CD","tutorial",{"slug":980,"featured":6,"template":686},"a-visual-guide-to-gitlab-ci-caching","content:en-us:blog:a-visual-guide-to-gitlab-ci-caching.yml","A Visual Guide To Gitlab Ci Caching","en-us/blog/a-visual-guide-to-gitlab-ci-caching.yml","en-us/blog/a-visual-guide-to-gitlab-ci-caching",{"_path":986,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":987,"content":993,"config":1000,"_id":1002,"_type":14,"title":1003,"_source":16,"_file":1004,"_stem":1005,"_extension":19},"/en-us/blog/accelerate-state-of-devops-report-key-takeaways",{"title":988,"description":989,"ogTitle":988,"ogDescription":989,"noIndex":6,"ogImage":990,"ogUrl":991,"ogSiteName":670,"ogType":671,"canonicalUrls":991,"schema":992},"Software supply chain security practices seeing only modest adoption","DORA Accelerate State of DevOps report shows opportunity lies within better security practices, including a focus on culture.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663779/Blog/Hero%20Images/cicd-2018_blogimage.jpg","https://about.gitlab.com/blog/accelerate-state-of-devops-report-key-takeaways","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Software supply chain security practices seeing only modest adoption\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aathira Nair\"}],\n        \"datePublished\": \"2023-02-21\",\n      }",{"title":988,"description":989,"authors":994,"heroImage":990,"date":996,"body":997,"category":875,"tags":998},[995],"Aathira Nair","2023-02-21","\nIf you are wondering what area of DevSecOps has tremendous opportunity for impact, look no further than [security of your software supply chain](https://about.gitlab.com/blog/the-ultimate-guide-to-software-supply-chain-security/). \n\n\"Software supply chain security practices, embodied as the [SLSA](https://about.gitlab.com/blog/achieve-slsa-level-2-compliance-with-gitlab/) or [SSDF](https://about.gitlab.com/blog/comply-with-nist-secure-supply-chain-framework-with-gitlab/) frameworks, are already seeing modest adoption but are not seeing universal adoption yet. There is still a lot of room for improvement there,\" said Todd Kuleza, a member of Google Cloud's DevOps Research and Assessment (DORA) team and a senior user experience (UX) researcher at Google Cloud.\n\nKuleza, a co-author of the DORA team's [2022 State of DevOps Report](https://cloud.google.com/devops/state-of-devops/), recently joined GitLab for [a webcast](https://learn.gitlab.com/diy-devops/stateofdevops2022) to discuss software supply chain security adoption, including:\n- Why teams choose CI/CD and other modern development processes to improve their security posture\n- How automated security checks within integration and deployment help developers own security processes\n- How to establish team security practices to reduce developer burnout\n\n> [Listen to the full webcast](https://learn.gitlab.com/diy-devops/stateofdevops2022) to learn how to model your organization's security practices around the DevSecOps capabilities of high-performing teams.\n\nThe [DORA metrics](https://about.gitlab.com/blog/how-the-dora-metrics-can-help-devops-team-performance/) have become central to how we understand software delivery velocity and team performance. They have helped organizations transition to a data-driven approach for software delivery, inline with business goals.\n\n## Securing the software supply chain\n\nFrom our own [GitLab 2022 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/previous/2022/), we learned that more than 50% of developers are \"fully responsible\" for security in their organizations. Meanwhile, the DORA team found that the greatest predictor for security practices is cultural, not technical: \"High-trust, low-blame cultures focused on performance are more likely to have above average adoption of emerging security practices,\" according to their report.\n\nThe DORA report also states that organizations with low levels of security practices have 1.4x greater odds of having high levels of burnout than teams with high levels of security.\n\nAll told, this data demonstrates that security culture and technology together have to be a primary focus for DevSecOps teams going forward.\n\nLearn more about the DORA metrics:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/lM_FbVYuN8s\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[9,875,999],"webcast",{"slug":1001,"featured":6,"template":686},"accelerate-state-of-devops-report-key-takeaways","content:en-us:blog:accelerate-state-of-devops-report-key-takeaways.yml","Accelerate State Of Devops Report Key Takeaways","en-us/blog/accelerate-state-of-devops-report-key-takeaways.yml","en-us/blog/accelerate-state-of-devops-report-key-takeaways",{"_path":1007,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1008,"content":1014,"config":1020,"_id":1022,"_type":14,"title":1023,"_source":16,"_file":1024,"_stem":1025,"_extension":19},"/en-us/blog/achieve-devsecops-collaboration",{"title":1009,"description":1010,"ogTitle":1009,"ogDescription":1010,"noIndex":6,"ogImage":1011,"ogUrl":1012,"ogSiteName":670,"ogType":671,"canonicalUrls":1012,"schema":1013},"DevSecOps basics: 5 cross-functional team collaboration goals","Team work makes the (DevSecOps) dream work. Here's what you need to know about collaboration.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663594/Blog/Hero%20Images/devsecops-cross-collaboration.jpg","https://about.gitlab.com/blog/achieve-devsecops-collaboration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevSecOps basics: 5 cross-functional team collaboration goals\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2020-07-01\",\n      }",{"title":1009,"description":1010,"authors":1015,"heroImage":1011,"date":1017,"body":1018,"category":679,"tags":1019},[1016],"Vanessa Wegner","2020-07-01","\n_This is the second in a five-part series on getting started with [DevSecOps](/solutions/security-compliance/). Part one gives you nine ways to [shift security left](/blog/efficient-devsecops-nine-tips-shift-left/). Part three offers concrete steps to add [automated security testing](/blog/devsecops-security-automation/) into the mix. And part four explains how to [build a strong security culture](/blog/security-culture-devsecops/) to support your DevSecOps efforts._\n\nCross-functional collaboration seems like a dry buzzword, but I promise you it’s way better than it sounds. After all, [DevOps](/topics/devops/) is cross-functional collaboration. DevSecOps is too. [In GitLab’s 2020 DevSecOps Survey](/developer-survey/), respondents had a plethora of strong reasons to do DevOps, including code quality, faster time to market, and _happier developers_.  But if there are rifts in communication and collaboration, any joint Dev, Sec, or Ops effort will all be for naught. \n\n[Collaboration](/blog/future-merge-requests-realtime-collab/) is a core principle of DevOps but it is even more critical when bringing a third element – security – into the mix. Team members should feel comfortable reaching out across functions, asking questions, and sharing (non-sensitive) information. DevSecOps brings a special meaning to collaboration because of the shift in roles and responsibilities introduced by new security efforts. [Shifting your security practices left](/blog/efficient-devsecops-nine-tips-shift-left/) will require some heavy lifting to truly get your DevSecOps practices off the ground.\n\n## Leading by example\n\nTo begin, leaders from each functional team need to gain a mutual understanding of the other teams’ functions, roadblocks, and goals. Then they should discuss how security will be integrated into dev and ops – both how the lifecycle will flow, and how employees will be onboarded to any new processes. The results of that discussion should be shared across the entire organization to put everyone on the same page. \n\nOrganizational heads will need to set an example for their teams. Employees should understand the collaborative work that is being done at the top, and how their own work is part of that effort. Additional expectations should also be communicated. These, as outlined below, should foster a collaborative environment that requires communication and reliability across teams. \n\n### Cross-functional team goals\n\nIt’s important to start with cross-functional team goals. These can be broad (like \"deliver a secure and stable product at every release\"), or specific (\"add extensive identity verification features while ensuring compliance with GDPR\"). Regardless of what the goal is, it should be made clear that employees across all functions are working together to achieve the same thing – and the cross-functional team will be evaluated as a whole. \n\n### Peer teaching and peer learning\n\nWhen security employees understand the function and goals of Dev and Ops, they’ll be able to give better guidance and instruction on how each role can produce secure work. On the other hand, when Dev and Ops understand the function and goals of security, they’ll find it more logical to incorporate new security practices into their day-to-day work. This way, employees will understand how their goals align with and benefit each other. Employees should be encouraged to help one another learn – and certainly should be encouraged to learn from each other with open minds. \n\n### Centralized information sharing\n\nFor the best possible [DevSecOps](/solutions/security-compliance/) experience, information needs to live and be shared in a central location – preferably [a single platform for the entire DevOps lifecycle](/stages-devops-lifecycle/). Ideally, the entire project team has access to all the information they need, all in the same place. This minimizes context-switching and reduces the likelihood of information getting lost or missed by team members. Keeping change logs, test and scan results, code reviews and other metrics colocated means everyone knows where to find the information they need to get their job done efficiently.  \n\n## DevSecOps: Five collaboration goals\n\nWhat does it look like to have strong collaboration across your teams? Qualitative principles are slightly harder to quantify than things like vulnerabilities, but there are plenty of ways to build your team's collaborative muscles and measure their strength:\n\n1. Project planning is a joint effort between Dev, Sec, and Ops. \n1. Employees have access and actively contribute to a single datastore with reporting and visibility across the DevSecOps lifecycle.\n1. Vulnerability management, reporting, and remediation will cost less and happen more quickly than before you began your DevSecOps efforts.\n1. Tools have been consolidated so that development and security can collaborate within the same interface. \n1. Project delays are rarely caused by lack of communication or information sharing. \n\n_How efficient are your DevSecOps practices? [Take our DevSecOps Maturity Assessment to find out.](https://about.gitlab.com/resources/devsecops-methodology-assessment/)_\n\n**Read more about DevSecOps:**\n\n[How CI can get you to DevSecOps faster](/blog/solve-devsecops-challenges-with-gitlab-ci-cd/)\n\n[Why security as code is important](/blog/how-to-security-as-code/)\n\n[How to integrate security into DevOps](/blog/how-to-security-as-code/)\n\nCover image by [Charlie Egan](https://unsplash.com/@charlieegan3) on [Unsplash](https://unsplash.com/photos/qOR762W7OvA)\n{: .note}\n",[855,749,9,875],{"slug":1021,"featured":6,"template":686},"achieve-devsecops-collaboration","content:en-us:blog:achieve-devsecops-collaboration.yml","Achieve Devsecops Collaboration","en-us/blog/achieve-devsecops-collaboration.yml","en-us/blog/achieve-devsecops-collaboration",{"_path":1027,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1028,"content":1034,"config":1042,"_id":1044,"_type":14,"title":1045,"_source":16,"_file":1046,"_stem":1047,"_extension":19},"/en-us/blog/adsoul-devops-transition-to-gitlab-ci",{"title":1029,"description":1030,"ogTitle":1029,"ogDescription":1030,"noIndex":6,"ogImage":1031,"ogUrl":1032,"ogSiteName":670,"ogType":671,"canonicalUrls":1032,"schema":1033},"How adSoul transitioned to GitLab CI from Jenkins","adSoul, a marketing automation company, outlines a successful three-phase migration plan for moving to GitLab CI from Jenkins.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678442/Blog/Hero%20Images/londoncommit.png","https://about.gitlab.com/blog/adsoul-devops-transition-to-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How adSoul transitioned to GitLab CI from Jenkins\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brein Matturro\"}],\n        \"datePublished\": \"2019-11-05\",\n      }",{"title":1029,"description":1030,"authors":1035,"heroImage":1031,"date":1037,"body":1038,"category":791,"tags":1039},[1036],"Brein Matturro","2019-11-05","\nadSoul is a Germany-based data-driven online marketing company that aims to improve search engine advertising and scalability for businesses. The core of adSoul relies heavily on API interfaces and entity recognition to post keywords on Google and Bing with marketing automation. \n\nAt GitLab Commit London, [Philipp Westphalen](https://www.linkedin.com/in/philipp-westphalen-a83318188/), fullstack developer at adSoul and GitLab Hero, shares how the company transitioned from Jenkins to GiLab CI. adSoul is a startup company with five developers, and as Philipp says “We literally have no time for everything we need to do.” They were looking for a tool that requires less time-consuming maintanence, and with Jenkins the team found it hard to read their existing files. “Our Jenkins was not so stable at all and it was tough to change because it was managed by our provider,” Philipp says. Cost and visibility were also huge motivators in moving away from [Jenkins to Gitlab CI](/blog/docker-my-precious/).\n\n## GitLab migration in three phases\n\nPhase 1: Move the repository.\nThe [adSoul team](https://www.adsoul.com) used the GitHub Import by GitLab, but had setbacks with migrating their issues, so they created a GitHub open source issue migrator as a resolution. Following that, they modified scripts with the new origin by exchanging the GitHub API call with a GitLab API. “This was really easy and we had a stable build with our new repository, so we could move our product management to GitLab and not need GitHub anymore,” Philipp says.\n\nPhase 2: Migrate the CI/CD pipeline.\nThe team started to create a GitLab CI YAML and tried to do a simple ‘lift and shift,’ however their processes were more complicated than anticipated. Though this phase was time consuming, it became clear the team could move to phase three without hiccups. “Quick pro tip,” says Philipp. “If you’re running your own GitLab runners, increase the log limit if you have to debug your building step.” \n\nPhase 3: Improve the CI/CD pipeline.\nThe team thought about ways of building their software, so they split projects into steps. “Our idea was that one job does one thing perfectly. Each job is simple and everyone can modify it easily” Philipp says. They improved their build time by moving to Gradle, created parallel job processing, and by using standard Docker images for ease of management. \n\n## Takeaways from a successful migration\n\n1. Plan your migration. Get every member of the team involved and aware of the upcoming changes, including how tools are working together and what the expectations are moving forward. “Take your time for the migration,” Philipp says. “It’s not two days and then we are finished.” \n\n2. Go step by step. adSoul used a three phase plan which allowed the team to deploy a new version and still continue to work on existing projects. “We could improve our application without having to wait for a better infrastructure,” Philipp says.\n\n3. Rethink your [DevOps strategy](/blog/better-devops-with-gitlab-ci-cd/). In the time leading up to the migration, examine things like security automation and other important pieces in a DevOps overall strategy.\n\n4. Start with a small project. Work closely with colleagues to create small GitLab CI projects to familiarize everyone before creating larger, overwhelming projects.\n\nPro tip: Keep your pipeline user friendly. Create a good user experience for the team with clear job names, style your config for a better overview, and write comments for variables and hard to understand code. \n\n## Why GitLab works for a small team\n\n“The most important thing is that GitLab is a powerful CI/CD solution with high customization,” Philipp says. There is one home for all projects, without dependencies on one another. With Jenkins, even small exploratory changes can impact the larger job. “With GitLab, you don’t have dependency between branches. So, if you’re trying something new for your CI, you can do it simply in your branch and the master branch will not be affected by the changes,” Philipp says.\n\nThe CI is low maintenance, which is a useful timesaver for a smaller team. “The CI provides us with really low maintenance time. So, usually we don’t have to care about our CI for a month or more,” Philipp says.\n\nTo learn more about adSoul’s migration to GitLab, watch Philipp’s talk from GitLab Commit London.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/C5xfw0ydh2k\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[109,9,682,1040,936,1041],"performance","cloud native",{"slug":1043,"featured":6,"template":686},"adsoul-devops-transition-to-gitlab-ci","content:en-us:blog:adsoul-devops-transition-to-gitlab-ci.yml","Adsoul Devops Transition To Gitlab Ci","en-us/blog/adsoul-devops-transition-to-gitlab-ci.yml","en-us/blog/adsoul-devops-transition-to-gitlab-ci",{"_path":1049,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1050,"content":1056,"config":1061,"_id":1063,"_type":14,"title":1064,"_source":16,"_file":1065,"_stem":1066,"_extension":19},"/en-us/blog/advanced-devsecops-practices",{"title":1051,"description":1052,"ogTitle":1051,"ogDescription":1052,"noIndex":6,"ogImage":1053,"ogUrl":1054,"ogSiteName":670,"ogType":671,"canonicalUrls":1054,"schema":1055},"How advanced are your DevSecOps practices?","Read here what the three levels of DevSecOps practices are and what they include and how to improve your own","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678388/Blog/Hero%20Images/advanced-devsecops-practices.jpg","https://about.gitlab.com/blog/advanced-devsecops-practices","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How advanced are your DevSecOps practices?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2019-10-21\",\n      }",{"title":1051,"description":1052,"authors":1057,"heroImage":1053,"date":1058,"body":1059,"category":679,"tags":1060},[1016],"2019-10-21","\n[DevSecOps](/solutions/security-compliance/) doesn’t happen overnight – between team alignment, new\nresponsibilities, new processes, and automation, there is a lot that needs to\nhappen to reach an advanced state of DevSecOps. Then there's the question of\nwhat it means to be advanced. How do you know when you've reached a comfortably\nmature state? What defines a beginner or intermediate level of DevSecOps maturity?\n\n## Analysing your DevOps practicies?\n\nI set out to find answers to these questions and\ndiscovered a mountain of different measures. So instead of asking you to take your\nown journey through DevSecOps self-discovery, I compiled some points of maturity\nand segmented them into three classes: Beginner, intermediate, and advanced. The folks at the [2018 Open Security Summit](https://2018.open-security-summit.org/outcomes/tracks/owasp-samm/working-sessions/devsecops-maturity-model/) agree that\nDevSecOps maturity is generally evaluated across six dimensions: Technology, processes, culture, tools, automation, and information flow.\n\n## DevOps Maturity: Beginner\n\nTeams in the early phases of DevSecOps adoption show clear attempts to change\nthe inertia of their organizations, but don't yet have all people and processes\non board. A security mindset and culture is beginning to take hold in these early-stage teams. Testing may be interspersed throughout the development lifecycle, but\nsome of those tests may run manually. The processes and\noperations used by early-stage teams often lack transparency and standardization. This lack of clarity makes it difficult for teams to\nreproduce certain activities and requires developers figure out solutions\nfrom scratch when taking on a new project.\n\n## DevOps Maturity: Intermediate\n\nMany teams at an intermediate level of DevSecOps maturity have accepted\nthat security is everyone's responsibility – and dev, sec, and ops teams are\nlearning how to collaborate efficiently on software development. The pipeline\nintegrates automated security checks at a few points throughout the development lifecycle and provides visibility\ninto the actions taking place. Incident response may still lag behind these\nnewer developments, with teams reacting to incidents rather than proactively\ndefending against them.\n\n## DevOps Maturity: Advanced\n\nA mature DevSecOps practice is highly efficient and collaborative.\nDevelopers accept ownership of their security responsibilities and run tests\nagainst their code at every commit to ensure security and compliance. Each\nteam has visibility into an integrated toolchain (or better yet, [a single tool](/stages-devops-lifecycle/)),\nand developers work quickly within a self-service, easy to use, and\ncentralized platform at every phase. Automation helps teams test and remediate,\nminimizes back and forth between teams, and brings security to the speed of\nthe business.\n\nAs a whole, advanced DevSecOps practices take a proactive approach to security.\nCompliance and expectations are defined and standardized across teams. Testing\nshould evolve to anticipate the most likely targets for attack. Automated\nmonitoring will continue security efforts after launch, and response plans (for\nthe sec, dev, and ops teams) should be established in case of a breach.\n\n## DevSecOps is for everyone\n\nEach step toward DevSecOps is a step in the right direction – and it is increasingly\nrisky to leave security as a bolt-on operation. Regardless of size\nor history, every company can and should adopt DevSecOps for\nsoftware development. Strategies may vary: Nimble startups can adjust and\nadapt quickly, while larger incumbent businesses might begin with a pilot project,\nor choose to retrofit new security practices to established products.\n\nPhoto by Stanislav Kondratiev on [Unsplash](https://unsplash.com/s/photos/growth?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText).\n{: .note}\n",[9,875],{"slug":1062,"featured":6,"template":686},"advanced-devsecops-practices","content:en-us:blog:advanced-devsecops-practices.yml","Advanced Devsecops Practices","en-us/blog/advanced-devsecops-practices.yml","en-us/blog/advanced-devsecops-practices",{"_path":1068,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1069,"content":1075,"config":1080,"_id":1082,"_type":14,"title":1083,"_source":16,"_file":1084,"_stem":1085,"_extension":19},"/en-us/blog/agile-best-practices",{"title":1070,"description":1071,"ogTitle":1070,"ogDescription":1071,"noIndex":6,"ogImage":1072,"ogUrl":1073,"ogSiteName":670,"ogType":671,"canonicalUrls":1073,"schema":1074},"5 Agile best practices","Make the most out of Agile development with these technical best practices.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678597/Blog/Hero%20Images/run-agile-in-gitlab.jpg","https://about.gitlab.com/blog/agile-best-practices","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 Agile best practices\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2019-08-13\",\n      }",{"title":1070,"description":1071,"authors":1076,"heroImage":1072,"date":1077,"body":1078,"category":679,"tags":1079},[702],"2019-08-13","\n\n[Agile development](/solutions/agile-delivery/) can have\na transformative impact on teams and applications. These five best practices can\nhelp your team streamline and accelerate delivery.\n\n## 1. Continuous integration\n\n[Continuous integration](/solutions/continuous-integration/) works by pushing small code chunks\nto an application’s codebase hosted in a Git repository. Every push triggers a pipeline of scripts to build,\ntest, and validate code changes before merging them into the main branch. By\nbuilding and testing each change as early as possible – usually several times a\nday – teams can detect errors as quickly as possible, reduce integration problems,\nand avoid compounding problems, allowing teams to develop faster, with more confidence.\n\n## 2. Retrospectives\n\n[Retrospectives](/blog/how-we-used-gitlab-to-automate-our-monthly-retrospectives/) are conversations about what went well and what went wrong in a\nproject or iteration. One of the most important Agile qualities is continuous\nlearning, and retros provide a transparent way to discuss how various teams\nexperienced a sprint and voice any concerns or ideas.\n\n> “A successful team is a happy team. Bringing down cycle time can help a team be more\nsuccessful because they are shipping value more often, but your team might have more\nimportant things that must be addressed first. Using retrospectives will help you figure\nout what success means to your team, and what needs to be done to achieve\nthat success.” – [Rachel Nienaber](/company/team/#rnienaber), engineering manager, Geo\n\nTo generate the best results from a retrospective, there should be\n[a safe environment for feedback and discussion and a plan for advancing discussion\nfrom facts to\nconclusions](/handbook/engineering/management/group-retrospectives/).\n\n## 3. Pairing\n\nPairing sessions can help team members work through features both large and small,\ninspiring problem-solving and ideation. When pairing, one team member writes code\nwhile the other reviews each line. Pairing results in fewer bugs, increased innovation,\nand skills development. Team members can learn from each other and discover best\npractices. Team members can spontaneously pair or managers can set up a more\n[formal pairing session process](https://gitlab.com/gitlab-com/support/support-training/issues?label_name%5B%5D=pairing) 🍐\n\n## 4. Iterative development\n\nWhen teams iterate with small changes, they can\n[reduce cycle time](/blog/strategies-to-reduce-cycle-times/) and spark rapid feedback cycles.\nBy making the quickest changes possible to improve a user's outcome, teams can add\nuseful functionality with fewer bugs or usability issues since potential problems\nare spotted early. Other benefits of iterative development include faster time to\nmarket, reduced scope creep, and increased morale (i.e. team members can see their\nwork right away rather than wait several releases).\n\n## 5. Burndown charts\n\nIf your team uses a Scrum framework, consider using [burndown charts](https://docs.gitlab.com/ee/user/project/milestones/burndown_and_burnup_charts.html) to monitor\nsprint progress. Teams can visualize the work scoped in the current sprint to\nunderstand what work has been completed, allowing them to react to risks quickly\nand adapt. This information can help business stakeholders understand that anticipated\nfeatures may be delayed until a future sprint.\n\nEmploying Agile best practices will have a significant positive impact on efficiently\ncreating customer-centric products.\n\nDo you have any best practices that have transformed your team’s development process? We’d love to hear them!\n\nCover image by [Mikael Kristenson](https://unsplash.com/@mikael_k?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/6GjHwABuci4)\n{: .note}\n",[855,749,9,683],{"slug":1081,"featured":6,"template":686},"agile-best-practices","content:en-us:blog:agile-best-practices.yml","Agile Best Practices","en-us/blog/agile-best-practices.yml","en-us/blog/agile-best-practices",{"_path":1087,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1088,"content":1094,"config":1099,"_id":1101,"_type":14,"title":1102,"_source":16,"_file":1103,"_stem":1104,"_extension":19},"/en-us/blog/agile-mindset",{"title":1089,"description":1090,"ogTitle":1089,"ogDescription":1090,"noIndex":6,"ogImage":1091,"ogUrl":1092,"ogSiteName":670,"ogType":671,"canonicalUrls":1092,"schema":1093},"What is an Agile mindset?","Learn how embracing change can help you speed up software delivery.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680634/Blog/Hero%20Images/agilemind.jpg","https://about.gitlab.com/blog/agile-mindset","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What is an Agile mindset?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2019-06-13\",\n      }",{"title":1089,"description":1090,"authors":1095,"heroImage":1091,"date":1096,"body":1097,"category":679,"tags":1098},[702],"2019-06-13","\n\n\nEnsuring [Agile](/solutions/agile-delivery/) teams use the most [effective strategies](/solutions/agile-delivery/) to reduce cycle time is a\npriority for IT leaders, but what good is a menagerie of techniques if a team’s\napproach to software development doesn’t spark innovation? When it comes to\nbuilding the foundation for accelerating delivery, IT leaders have been incorrectly\nplacing emphasis on collecting tools rather than developing an Agile mindset.\n\n> “The core of Agile is recognizing that we need to get to and maintain an Agile mindset. **If I have an organization with an Agile mindset, and really rock-solid product management, Agile processes and tools will evolve out of that. If you have the Agile mindset and an awesome connection with your customers and are solving their problems, things will evolve in the right way.** You won’t even realize you’re being Agile. It’s just good business.” — [Todd Little](https://www.forbes.com/sites/stevedenning/2016/06/07/the-key-missing-ingredient-in-the-agile-manifesto-mindset/#4fa5917467ff), CEO Lean Kanban\n\nThere are many definitions of an Agile mindset, but the general consensus is that it:\n\n* Views setbacks as learning opportunities\n* Embraces iteration, collaboration, and change\n* Focuses on delivering value\n\n## Agile mindset characteristics\n\nThere’s no definitive list of what makes up an Agile mindset, but with the\nintention of getting you started, here are a few of the most widely accepted\ncharacteristics. Based on your team’s dynamics, your organization’s culture, and\nyour goals, you may adopt other attributes to help your team accelerate delivery.\n\n### Setbacks are learning opportunities\n\nEmpower your team to experiment and be creative so that rather than view a setback\nas a failure, they’ll see it as an opportunity to learn and grow. When your team\nhas the freedom to be innovative – without fear – they’re more likely to solve\nproblems and add to the knowledge base of what works and what doesn’t.\nTaking risks shouldn’t be a rebellious endeavor — it should be your team’s norm.\n\n### Agile values and principles: Iteration, collaboration, and change\n\n**Iteration**: Instill the belief that there’s always room for improvement and\nthat anyone can propose a change or idea. At GitLab, we believe\n[everyone can contribute](/company/mission/#mission) and that [iteration is the fastest\nway to feedback](https://handbook.gitlab.com/handbook/values/#iteration), helping us course correct and\ncreate new features.\n\n**Collaboration**: Finding ways to improve and increase cross-collaboration\nenables frictionless handoffs, helps relieve the burden on teams, and facilitates\na culture of trust and communication. Whether you develop new workflows or use\ndifferent tools, keep an eye out for silos which can work against collaboration.\n\n**Change**: Agile methodology is founded on the ability to adapt to\nunpredictability. If your customers or organization want to pivot soon after a\ndirection is set, your team should be able to do just that. Any\nprocesses or roadblocks that prevent your team’s ability to be flexible and\nembrace change should be removed.\n\n### Deliver value\n\nWe can all agree that teams should deliver value both to customers and the\norganization. But where an Agile mindset makes all the difference is shifting the\nemphasis from the output, which focuses only on the items delivered, to the\noutcome, which is how a feature meets a market need. An Agile mindset helps teams\ncreatively think of how a feature can solve a problem rather than feel pressured\nto deliver a set number of items in a month. It’s the whole “quality over quantity” idea.\n\n## Steps to shift to an Agile mindset\n\nChanging your team’s perspective and the way they approach problems is a difficult\nundertaking. You’re challenging their long-held beliefs while requiring them to\ncomplete tasks and meet deadlines. This is an uncomfortable process in any\nenvironment, but especially in the workplace where an (in)ability to quickly\nshift can impact performance and reputation. Fortunately, there are a few\nmethods to help you navigate these difficulties and enable your team to smoothly\nadopt an Agile mindset:\n\n\n1. **Model behavior**: The most effective way to help your team shift to an Agile\nmindset is to exemplify the behaviors you want to see. To create a\n“no-fault, embrace risk” environment, share your setbacks with the team and tell\nthem what you learned. When someone experiments, praise them for trying something\nnew and discuss the biggest lessons learned. By being transparent and showing your\nteam that this new way of thinking is possible, you become their collaborator.\n1. **Storytelling**: Share how other organizations or teams have benefited from\nan Agile mindset. Understanding what others gained from a new way of\nthinking can help your team feel more enthusiastic about the change.\n1. **Take small steps**: After doing more research about an Agile mindset, you\nmight get excited and feel tempted to change things overnight. Take small steps\nand make minor adjustments in the beginning to help your team acclimate.\n\n## What's the impact?\n\nWith an Agile mindset, teams can quickly adjust to changing market needs, respond\nto customer feedback, and deliver business value. Adopting a new perspective can\npositively change a team’s culture, since the shift permits innovation without fear,\ncollaboration with ease, and delivery without roadblocks.\n\nCover image by [Benjamin Voros](https://unsplash.com/@vorosbenisop?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/X4bgpcGBNAY)\n{: .note}\n",[855,749,9,683],{"slug":1100,"featured":6,"template":686},"agile-mindset","content:en-us:blog:agile-mindset.yml","Agile Mindset","en-us/blog/agile-mindset.yml","en-us/blog/agile-mindset",{"_path":1106,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1107,"content":1113,"config":1118,"_id":1120,"_type":14,"title":1121,"_source":16,"_file":1122,"_stem":1123,"_extension":19},"/en-us/blog/agile-pairing-sessions",{"title":1108,"description":1109,"ogTitle":1108,"ogDescription":1109,"noIndex":6,"ogImage":1110,"ogUrl":1111,"ogSiteName":670,"ogType":671,"canonicalUrls":1111,"schema":1112},"Improving pair programming with pairing sessions","Pairing with a teammate can increase delivery. Here we look at what pairing sessions are, what they involve and what they're good for.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665897/Blog/Hero%20Images/incrementalcodedevelopment.jpg","https://about.gitlab.com/blog/agile-pairing-sessions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Improving pair programming with pairing sessions\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2019-08-20\",\n      }",{"title":1108,"description":1109,"authors":1114,"heroImage":1110,"date":1115,"body":1116,"category":679,"tags":1117},[702],"2019-08-20","\nArya and Sansa. Han and Chewbacca. Harry and Ron. When people team up, great things can happen.\n\n## What is pair programming?\n\nPair programming, an Agile approach to software development, involves two programmers working together at the same workstation. One programmer (called the driver) writes code while the other programmer (called the navigator) reviews code in real time. Pairing sessions can accelerate [Agile delivery](/solutions/agile-delivery/), because teammates work together to find the best solutions to several challenges. \n\nRather than working in silos, team members work together to share knowledge and quickly move through obstacles. Sounds good, right? Well, some organizations view pair programming as an inefficient use of time. After all, why should two developers work on the same piece of code when there’s a mountain of technical debt, an impending release, and lingering OKRs around the corner?\n\n## How to get started with pair programming\n\nThe key to any successful paired programming partnership is open communication and creating a plan together so you can avoid bottlenecks during the project process. \n\nHere are a few things you need to consider as a team before beginning any coding work:\n\n* Have a mutual understanding of what “ready” looks like for this project. Consult each other as well as any stakeholder involved, like a product owner, so that everyone is clear on when to give the projects a final green light. \n* Create a step-by-step project plan. Consider how you will trade off coding and reviewing responsibilities, how you want to handle testing, and any other external help you may need to complete the project. \n* Brainstorm as many potential roadblocks as you can think of in this planning process, and try to come up with potential solutions. You can brainstorm together on paper, talk it out, or go off separately and then share thoughts, but this is an important step. Always be prepared!\n* Agree on the technology you want to use. From computers and keyboards to reliable wifi or a whiteboard, make sure you have all of the tools you need.\n\n## Some pair programming best practices\n\nTo achieve the best outcome of your pair programming experience, we recommend you follow these best practices:\n\n* **ABC (Always be communicating).** Regardless of whether you’ve worked well together in the past or you’re a brand new partnership, the importance of communication can’t be overstated. Two individuals are likely to have different thoughts and opinions along the way. To keep the project (and yourselves) from suffering, establish open and frequent communication practices early.\n* **Take turns.** No single person has to be the only one navigating or driving, and you shouldn’t. Take turns in each role as often as you need to make sure your minds and eyes stay fresh and you keep producing quality work.\n* **Take a break.** Rome wasn’t built in a day, and neither was coding. You and your pair programming partner need to make sure to take breaks so as not to induce burnout. \n* **Get good technology tools.** And remember to click that video on. Oftentimes, pair programming is done remotely. It can help to have an actual facetime conversation, even if it’s virtual, to stay connected and communicative throughout the course of the partnership. \n* **Ask for help.** If there is a part of your project that both of you don’t understand, ask for clarification. Better to ask ahead of project completion than after. \n\n## The case for and against pair programming\n\nThere are benefits and drawbacks to pairing sessions, so a few GitLab team members\nshared their thoughts to help you determine whether pair programming is right for you.\n\n> “I've done pair programming in the past. I love it because it helps to bounce\nideas off people, and I find we often could solve ‘bigger’ problems faster. To me,\nthe downside is measuring/proving that this is a good method of programming since\nmany people see this as inefficient (two people working on the same problem).” –\n[Cynthia Ng](/company/team/#TheRealArty), senior support agent\n\nToday’s developer feels the pressure of delivering at rapid speeds. Sometimes, a\nchallenge is just too complex for one person to solve, and pairing sessions can\nhelp alleviate the difficulties experienced when racing towards a release while\ncarrying a burdensome issue. Talking through solutions and drawing on each other’s\nexperiences can help a pair work towards a new approach.\n\nMeasuring the effectiveness of pairing sessions might be difficult, but there are ways to\nevaluate success. Considering failures in functionality, the number of\nbugs, and improvements in productivity can help teams determine whether pairing\nmakes a difference with delivery.\n\n### The role of engagement and continuous learning in delivery\n\nIT leaders may be reluctant to embrace pairing, since two developers dedicate\ntheir time to a single problem, but it’s important to note researchers have\nfound that\n[90% of new skills learned are lost due to lack of\nengagement](https://www.wsj.com/articles/SB10001424052970204425904578072950518558328),\nand in an Agile framework, a culture of continuous learning helps improve all aspects of delivery.\n\n> “When I was a junior developer, I found it very helpful to talk through my\nthought process and hear how senior developers approached the same problem. But,\nas an introvert, I found it exhausting to do all day, every day.” –\n[Jennie Louie](/company/team/#jennielouie), test automation engineer, Enablement\n\nAgile models often include the value of continuous learning to help everyone –\nfrom C-level to junior level – develop new skills to remain adaptable and productive.\nPairing sessions provide a platform from which teammates can learn in tandem.\n\n> “I’ve never done ‘strict’ pairing with a driver/navigator, only the relaxed kind\nwhere you just chat and sometimes switch keyboards. And while I can't really imagine\npairing full-time, I guess with the right pair and some practice it could indeed be\na great experience.” – [Markus Koller](/company/team/#toupeira), backend engineer, Create:Editor\n\nThe drawbacks to pair programming might make you hesitate, but I encourage you to\ntake a chance on it, especially if you want to accelerate delivery. Here are a\nfew pros and cons of pairing to help you understand the process:\n\n### Advantages of pair programming\n\nDirectly collaborating with a teammate can increase morale and inject fun and\ndiversity in one’s day. By working alongside each other, teammates can learn\ndifferent coding practices, workflow techniques, and new ways of approaching\nproblems, which increases innovation and efficiency and decreases knowledge silos.\n\n> “Pair programming can be great for onboarding, mentoring, and [rubber ducking](https://en.wikipedia.org/wiki/Rubber_duck_debugging)\ndifficult problems, since teammates receive immediate\nfeedback.” – [Andrew Kelly](https://gitlab.com/ankelly), senior security engineer, [Application Security](/topics/devsecops/)\n\nJunior developers benefit when pair programming with senior developers, since they’ll\ngain strong industry knowledge. Meanwhile, senior developers get teaching experience\nand the ability to think critically about solutions.\n\n> “Programming is fairly abstract. When you have to explain a concept verbally, it\noften makes you realize you're missing pieces or that there are better\nways to solve problems than your initial idea.” – [Brandon Lyon](/company/team/#brandon_m_lyon), marketing web developer/designer\n\nRegardless of experience level, everyone can benefit from pairing sessions, since\nthere is no right answer in programming. I consider software development a multi-faceted\nendeavor in which imagination and creativity are driving forces. Based on knowledge,\nexperience, and learning styles, people approach some aspects of code with\na different understanding of how it ties into existing systems. When pairing, people can\ndiscuss these perspectives and assess which approach is best.\n\n### Disadvantages of pair programming\n\nPairing might sound like the solution to many of your delivery problems, but it’s\nnot all roses and rainbows.\n\nGiven the success of pairing, teammates might be tempted to join forces a little\ntoo often. Pair programming can feel inefficient if overdone or used for tasks\nsuch as boilerplate code, smaller and well-defined changes, and [yak shaving](https://www.techopedia.com/definition/15511/yak-shaving).\n\n> “Pair programming is not a silver bullet. Some software solutions just need a\nsingle person to hunker down and work it out before sharing with others.” – [Andrew Kelly](https://gitlab.com/ankelly)\n\nIf teams are just starting out with pairing, it can take practice and patience\nto be a “good pair,” which can be difficult even for experienced pair programmers.\nDo retros after a pairing session to understand what worked well, what didn’t work,\nand how you can improve future sessions.\n\n## See it in action\n\nNow that you know a bit more about pair programming, you might feel ready to take\nthe plunge. At GitLab, we 💖 pairing. Most pairing sessions occur when developers\nwork at the same station, but as an [all-remote company](/company/culture/all-remote/),\nwe’ve found ways to make it work.\n\n> “Remote pair programming can be tougher than in-person pairing. Distance plus the\ntooling isn’t always the best, but it’s not impossible.” – [Andrew Kelly](https://gitlab.com/ankelly)\n\nGitLab’s Support team created a [dedicated project and issue templates for pairing\nsessions](https://gitlab.com/gitlab-com/support/support-training/issues?label_name%5B%5D=pairing).\n\n> “In Support, we do pairing sessions (or group ‘crush sessions’) and find we often\nget through _more_ tickets when working together, so it's something we're tracking\nas a milestone for each quarter.” – [Cynthia Ng](/company/team/#TheRealArty)\n\nOver in engineering, the Frontend team has also been [experimenting with how to support\npair programming](https://gitlab.com/gitlab-org/frontend/general/issues/12). The\nteam has used VSCode live share a few times but enjoys open discussion and sending\npatches to each other.\n\n> “The best format so far is someone posts a \"🍐 request\" in the #frontend_pairs\nSlack channel – people show interest – a time is scheduled on the calendar – then\nwe do somewhat of a mob programming session.” – [Paul Slaughter](/company/team/#pslaughter), frontend engineer, Create:Editor\n\nEvery software team hears the importance of acceleration, and it can be a daunting\nthought, especially when faced with complex problems. The next time you find\nyourself dragging your fingers across the keyboard and dreading that next line of\ncode, consider pairing up with a teammate to tackle issues together.\n\n> “Pairing will look different for everyone. Anything that encourages\ncommunication, engaged knowledge sharing, and breaking our engineering silos is\ngood.” – [Paul Slaughter](/company/team/#pslaughter)\n\nCover image by [Jonathan Mast](https://unsplash.com/@jonathanmast) on [Unsplash](https://unsplash.com/photos/RW6Wz9QaoKk)\n{: .note}\n",[855,749,9,683],{"slug":1119,"featured":6,"template":686},"agile-pairing-sessions","content:en-us:blog:agile-pairing-sessions.yml","Agile Pairing Sessions","en-us/blog/agile-pairing-sessions.yml","en-us/blog/agile-pairing-sessions",{"_path":1125,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1126,"content":1132,"config":1139,"_id":1141,"_type":14,"title":1142,"_source":16,"_file":1143,"_stem":1144,"_extension":19},"/en-us/blog/agile-planning-with-a-devops-platform",{"title":1127,"description":1128,"ogTitle":1127,"ogDescription":1128,"noIndex":6,"ogImage":1129,"ogUrl":1130,"ogSiteName":670,"ogType":671,"canonicalUrls":1130,"schema":1131},"Agile planning with a DevOps platform","How a DevOps platform enables an entirely different way to plan and manage work","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669233/Blog/Hero%20Images/photo-1531403009284-440f080d1e12.jpg","https://about.gitlab.com/blog/agile-planning-with-a-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Agile planning with a DevOps platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cormac Foster\"}],\n        \"datePublished\": \"2021-05-19\",\n      }",{"title":1127,"description":1128,"authors":1133,"heroImage":1129,"date":1135,"body":1136,"category":1137,"tags":1138},[1134],"Cormac Foster","2021-05-19","\n\nSeveral years ago, a portfolio manager asked me if he needed to learn about “all the stuff the [DevOps](/topics/devops/) people do.” I told him yes, explained why it was worth it to “learn their language,” and discussed how he could extract nuggets of information to help unlock product value. It was good advice at the time, but it didn’t answer the bigger question—“Sure, he *should*, but should he *have to*?” \n\nThe answer to that question is no. He already had a job—managing a P&L for several products. He shouldn’t have to learn another job just to do that one well. \n\nTools are rarely the solution, but they’re often the problem. At the time, without custom integration, lots of digging, manual translation, and a little bit of luck, there just wasn’t a good way to surface all the information the portfolio manager needed to do his job well. At best, he’d receive batched reports from different tools in his DevOps toolchain, with none of them connected to the tools where decisions were made. So putting on a DevOps hat was the best compromise.\n\nTimes have changed for the better. DevOps and Agile have matured. We’ve established best practices, we know how and when to deviate from them, and we have an idea how we’d like to improve them. On the tool side, that means we’re ready to ditch those toolchains for a platform.\n\nGitLab was the first [DevOps platform](/solutions/devops-platform/) — designed as a single application from the beginning — but platform evolution is nothing new. Salesforce combines what used to be a disparate toolchain with massive integration overhead into a CRM platform that anyone, in any role, can use to boost productivity. Recently, the industry seems to have started to endorse the trend toward DevOps platforms. Last year, Gartner identified a new market in its [2020 Market Guide for Value Stream Delivery Platforms](https://about.gitlab.com/analysts/gartner-vsdp21/), in which GitLab was a Representative Vendor. \n\n![Epic roadmap view in GitLab](https://about.gitlab.com/images/blogimages/epic_roadmap.png \"Status rollups in epic roadmaps are always up-to-date\")\n\nWe’re excited to see industry experts recognize that we’ve reached the next stage of evolution. But what does a DevOps platform mean for that portfolio manager, or a product owner, or anyone else focused on the “business” end of business? Quite a lot, actually. It means:\n\n* Accuracy: When the work happens inside the same system of the planning, there is no lost data at API chokepoints, no delayed outputs from batch processes, and no doubt that the status rollups for an epic are anything but up-to-date.\n* Visibility: When you need more than a roll-up of an initiative’s status, a DevOps platform lets you inform your planning by clicking through into any level of detail — down to actual code changes or security and performance scan results.\n* Efficiency: Contextual drilldowns mean never again having to sift through spreadsheets full of useless-to-you data just to find that one thing you need.\n* Actionability: “Reporting” is so 20th century. A DevOps platform lets you learn, plan, and execute in the same system, removing blockers, collaborating, and adjusting course without losing any context or time.\n* Delivery speed: Fewer resources spent maintaining integrations means more developers and ops personnel focused on actually delivering value to your users.\n\nDon’t just take our word for it: look at customers like [British Geological Survey](/customers/bgs/), which uses GitLab to collaborate across roles.\n\n> *“GitLab has become our central place to store code and issues. It's become a mission critical system for our organization.”*\n>\n> **Wayne Shelley**, DevOps integration leader, BGS\n\nIndustry experts are responding to our approach. In its [2021 Magic Quadrant for Enterprise Agile Planning Tools](https://learn.gitlab.com/2021-mq-eapt), Gartner named GitLab a Leader for the first time. We’re proud of the recognition, but we’re even more excited to continue to build on our unique take on Agile planning in the future — and you’re a part of that future. Please read our planning [vision](https://about.gitlab.com/direction/plan/#our-vision-of-a-loveable-solution) and contribute!\n\n_Gartner, Magic Quadrant for Enterprise Agile Planning Tools, Bill Blosen, Mike West, Deacon D.K Wan, Akis Sklavounakis, Keith Mann, Wan Fui Chan, Hassan Ennaciri, 20 April 2021_\n\n_Gartner does not endorse any vendor, product or service depicted in its research publications and does not advise technology users to select only those vendors with the highest ratings or other designation. Gartner research publications consist of the opinions of Gartner’s research organization and should not be construed as statements of fact. Gartner disclaims all warranties, expressed or implied, with respect to this research, including any warranties of merchantability or fitness for a particular purpose._\n\nCover image by [Alvaro Reyes](https://unsplash.com/@alvarordesign) [](https://unsplash.com/@martinsanchez?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)on [Unsplash](https://unsplash.com/photos/qWwpHwip31M)\n\n## Read more about the DevOps Platform:\n\n- [The journey to a DevOps Platform](/blog/the-journey-to-a-devops-platform/)\n\n- [How ten steps over ten years led to the DevOps Platform](/blog/how-ten-steps-over-ten-years-led-to-the-devops-platform/)\n\n- [Making the case for a DevOps platform: What data and customers say](/blog/making-the-case-for-a-devops-platform-what-data-and-customers-say/)\n\n- [Welcome to the DevOps Platform era](/blog/welcome-to-the-devops-platform-era/)\n\n- [It's time to build more accessible software. A DevOps platform can help](/blog/how-the-devops-platform-makes-building-accessible-software-easier/)\n","agile-planning",[855,749,9],{"slug":1140,"featured":6,"template":686},"agile-planning-with-a-devops-platform","content:en-us:blog:agile-planning-with-a-devops-platform.yml","Agile Planning With A Devops Platform","en-us/blog/agile-planning-with-a-devops-platform.yml","en-us/blog/agile-planning-with-a-devops-platform",{"_path":1146,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1147,"content":1153,"config":1159,"_id":1161,"_type":14,"title":1162,"_source":16,"_file":1163,"_stem":1164,"_extension":19},"/en-us/blog/ai-in-software-development",{"title":1148,"description":1149,"ogTitle":1148,"ogDescription":1149,"noIndex":6,"ogImage":1150,"ogUrl":1151,"ogSiteName":670,"ogType":671,"canonicalUrls":1151,"schema":1152},"How AI will change software development","AI has made self-driving cars possible, so what about self-writing code? We asked 14 DevOps practitioners, industry analysts and execs to share their take on how AI will impact software development.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681709/Blog/Hero%20Images/future-of-software-ai.png","https://about.gitlab.com/blog/ai-in-software-development","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How AI will change software development\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-10-28\",\n      }",{"title":1148,"description":1149,"authors":1154,"heroImage":1150,"date":1155,"body":1156,"category":679,"tags":1157},[851],"2020-10-28","\n\n_This is the third in our four-part series on the future of software development. Part one examines [the changing developer role](/blog/software-developer-changing-role/), part two takes a deep dive into [emerging technologies with the potential to impact development](/blog/how-tomorrows-tech-affects-sw-dev/), and part four shares [how to future-proof your developer career](/blog/future-proof-your-developer-career/)._\n\nArtificial intelligence has often been dismissed as a promising technology breakthrough that somehow remains out of reach, particularly when it comes to software development. The [role of AI in software development](/topics/devops/the-role-of-ai-in-devops/) has been written about for years and not much substantive has come of it.\n\nBut the stars may be aligning now. Developers are intrigued, and we can see that by looking at the growing popularity of the Python programming language. Stack Overflow’s [annual survey](https://insights.stackoverflow.com/survey/2020) shows Python’s rise in \"popularity\" and \"interest\" based on the number of questions members asked about it. It’s certainly the go-to language for [ML-powered chat bots](https://medium.com/better-programming/software-developer-trends-of-2020-and-beyond-d1b955bc46b8).\n\nAnd in our [2020 Global DevSecOps Survey](/developer-survey/), close to one-quarter of developers surveyed said that an understanding of AI/ML will be the most important skill for their future careers. And roughly 16% of testers said their teams are using bots right now or have an AI/ML tool in place for testing.\n\nSo if Tesla can create a self-driving car, can self-writing code be that far off? The short answer is no, at least according to the more than a dozen [DevOps](/topics/devops/) practitioners, industry analysts, and GitLab executives we spoke with about the future of software development. Here’s what they're thinking.\n\n## A gradual process\n\nAt GitLab AI feels like it will happen but gradually. \"Every set of software in the future is going to be the combination of some procedural code and some (AI) models,\" says GitLab CEO [Sid Sijbrandij](/company/team/#sytses). \"The models will eat more and more of the code over time.\" But Sid sees AI’s role as \"less of a distinct activity and more of an integrated call out to a library or a call out to a model.\"\n\nTo put it another way, senior developer evangelist [Brendan O’Leary](/company/team/#brendan) thinks it would be strange if AI weren’t playing a much more significant (and helpful) role in code development ten years from now. \"But this isn’t going to replace humans – it’s going to make the human role more critical to understand what’s important,\" Brendan says. He likens it to a detail-oriented second set of eyes that can sort through all the data quickly to focus coders on areas that need it. \"Computer-aided detection is really valuable in mammography because it’s hard to look for 1 millimeter specs of white,\" Brendan explains. \"Computer-aided detection is valuable because it surfaces the 'second look' areas to focus on. That’s the model I think we can expect when it comes to AI and software development.\"\n\n[Carlos Eduardo Arango Gutierrez](https://www.linkedin.com/in/eduardo-arango/?originalSubdomain=co), a software engineer at Red Hat (and a [GitLab Hero](/community/heroes/)) sees a big benefit to a bot \"colleague\" that will not only ID problems but will suggest solutions. \"I'm waiting for a bot that says 'oh your code is wrong and this is how you fix it,'\" Carlos says. \"You're no longer stuck because the bot is going to run the test for you and fix it.\"\n\n## Meet the Turing Bots\n\nSo there’s clearly a backstop/code testing/QA role for AI in software development, but there is more to it than that, according to Forrester Research. In its September 2020 webinar, \"The Future of Software Development: How AI Will Automate More Than 70% of Software Development,\" [Diego Lo Giudce](https://www.linkedin.com/in/diego-lo-giudice-52232/detail/recent-activity/posts/) and [Mike Gualtieri](https://www.linkedin.com/in/mgualtieri/), both vice presidents and principal analysts, make the case that so called \"Turing Bots\" will be generating code from software artifacts in ten years, or less. The technologies driving the bots include autonomous testing, auto ML (for predicting), reinforcement learning, and machine coding, the webinar says.\n\nThat’s a bold prediction and a lot to unpack for today's DevOps teams. It will be a process and culture shift, certainly, but it will also require sweeping changes in the developer thought process. Forrester recommends developers start now to \"define more precise artifacts and patterns, including app requirements, UX design and solution architecture.\"\n\n## Now take a deep breath\n\nIt’s important to remember, though, that AI is only as good as the data fed to it by humans – it’s not a substitute *for* humans. [Jose Manrique Lopez de la Fuente](https://www.linkedin.com/in/jose-manrique-lopez-de-la-fuente-b869884/), CEO at Bitergia, and also a GitLab hero, puts it this way: \"I don’t believe that we won’t need developers any more,\" he says. \"Artificial intelligence is not intelligent.\"\n\n_Wondering if your skills will keep you relevant in a time of AI overlords? Don’t miss our look at skills critical to a DevOps team's future in the fourth part of our series on the future of software development._\n",[9,813,1158],"testing",{"slug":1160,"featured":6,"template":686},"ai-in-software-development","content:en-us:blog:ai-in-software-development.yml","Ai In Software Development","en-us/blog/ai-in-software-development.yml","en-us/blog/ai-in-software-development",{"_path":1166,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1167,"content":1173,"config":1182,"_id":1184,"_type":14,"title":1185,"_source":16,"_file":1186,"_stem":1187,"_extension":19},"/en-us/blog/ai-ml-in-devsecops-series",{"title":1168,"description":1169,"ogTitle":1168,"ogDescription":1169,"noIndex":6,"ogImage":1170,"ogUrl":1171,"ogSiteName":670,"ogType":671,"canonicalUrls":1171,"schema":1172},"AI/ML in DevSecOps Series","This blog series chronicles our journey to integrate AI/ML throughout the software development lifecycle.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682693/Blog/Hero%20Images/ai-ml-in-devsecops-blog-series.png","https://about.gitlab.com/blog/ai-ml-in-devsecops-series","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"AI/ML in DevSecOps Series\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab AI Assisted Group\"}],\n        \"datePublished\": \"2023-04-24\",\n      }",{"title":1168,"description":1169,"authors":1174,"heroImage":1170,"date":1176,"body":1177,"category":1178,"tags":1179},[1175],"GitLab AI Assisted Group","2023-04-24","\n\nOur \"AI/ML in DevSecOps\" series tracks GitLab's journey to build and integrate AI/ML into our DevSecOps platform. Throughout the series, we’ll feature blogs from our product, engineering, and UX teams to showcase how we’re infusing AI/ML into GitLab. So be sure to bookmark this page and follow along.\n\nThis series details [many features introduced during our AI Fireside Chat](/blog/gitlab-ai-assisted-features/) on May 3, 2023.\n\nGet a [full overview of our AI-powered DevSecOps platform](https://about.gitlab.com/solutions/ai/). \n\n1. [What the ML is up with DevSecOps and AI?](/blog/what-the-ml-ai/)\nGitLab users already benefit from a step-function increase in productivity when they adopt our platform: streamlined collaboration, operational efficiencies, and massive acceleration in time to delivery. But by introducing machine learning (ML) and other artificial intelligence (AI) capabilities into the fabric of The DevSecOps Platform feature set, we aim to take those gains to a whole new level.\n\n2. [How AI-assisted code suggestions will advance DevSecOps](/blog/ai-assisted-code-suggestions/)\nAI-assisted code suggestions can substantially improve the programming experience by reducing errors and helping programmers write code faster, which will help reproduce the much higher production code quality.\n\n3. [ML experiment: Writing SQL is about to get a lot easier](/blog/ml-experiment-sql/)\nWith the recent advancements in AI and natural language processing, it's now possible for AI models to generate SQL code from simple English language queries. This means that even people without a deep understanding of SQL can generate complex queries to analyze their data. This technology not only improves accessibility but can also save valuable time and effort for data analysts.\n\n4. [ML experiment: Explain this source code](/blog/explain-this-code/)\nDeciphering the source code of a new software project can be a daunting or at least time-consuming task. The code may be poorly documented, or it may be written in a programming language that is unfamiliar to the developer. Even if the developer is familiar with the programming language, the code may be complex and difficult to understand. But what if developers had a helpful tool to figure out very quickly what code was doing? With recent advancements in AI models, it's now possible to have code explained in natural language.\n\n5. [ML experiment: Summarizing issue comments](/blog/summarize-issues/)\nLarge language models (LLMs) power generative AI solutions by using deep learning algorithms to analyze vast amounts of natural language data. These models are trained on massive datasets to develop an understanding of language patterns and context. Once trained, the models can generate new text that mimics human language. In a rapid prototype, our own Alexandru Croitor, Senior Backend Engineer, and Nicolas Dunlar, Senior Frontend Engineer for our Plan stage, leverage generative AI LLMs to power comment summarization within GitLab's issues.\n\n6. [ML experiment: Summarize merge request changes](/blog/merge-request-changes-summary-ai/)\nMerge requests are the central point of collaboration for code changes in GitLab. They often contain a variety of changes across many files and services within a project. Often, merge requests communicate the intent of the change as it relates to an issue being resolved, but they might not describe what was changed to achieve that. As review cycles progress, the current state of the merge request can become out of sync with the realities of the proposed changes and keeping people informed. We believe that we can leverage AI and large language models (LLMs) to help provide relevant summaries of a merge request and its proposed changes, so reviewers and authors can spend more time discussing changes and less time keeping descriptions updated.\n\n7. [ML experiment: Generate tests for code changes](/blog/merge-request-suggest-a-test/)\nProposing changes and new features via merge requests is great, but what about the tests? Sometimes, tests can be the hardest part of any code change you make. We are leveraging generative AI to enable developers to create tests for merge request changes helping reduce the laborious but important task of writing tests increasing test coverage. \n\n8. [ML experiment: Explain this vulnerability](/blog/explain-this-vulnerability/)\nSecurity vulnerabilities aren't always easy to understand, especially for developers without experience or training with cybersecurity. We're leveraging AI to help developers understand security vulnerabilities and even get guidence on how to resolve them.\n\n9. [ML experiment: Use a chatbot to answer how-to questions](/blog/gitlab-chat-ai/)\nLarge language models (LLMs) have changed the way everyday people interact with large volumes of text. We thought, why not train an LLM on GitLab's extensive documentation to help users quickly answer natural language questions. Gone are the days of endless searching through vast documentation sites.\n\n10. [Track ML model experiments with new GitLab MLFlow integration](/blog/track-machine-learning-model-experiments/)\nModel experiments allow data scientists to track different variations of machine learning models directly on GitLab.\n\n11. [Code Suggestions available to all GitLab tiers while in Beta](/blog/code-suggestions-for-all-during-beta/)\nWe've made code suggestions available to all plans for free during Beta. Also, learn about recent updates to Code Suggestions.\n\n12. [ML experiment: Summarize my merge request review](/blog/summarize-my-merge-request-review/) \nLearn how GitLab is experimenting with ML-powered merge request review summaries.\n\n13. [How Code Suggestions can supercharge developers' daily productivity](/blog/code-suggestions-improves-developer-productivity/)\nLearn how you can use GitLab Code Suggestions to accelerate your development.\n\n14. [ML experiment: Extending Code Suggestions to more development environments](/blog/extending-code-suggestions/)\nLearn how we're expanding Code Suggestions to support Visual Studio, \nJetBrains IDEs, Neovim, and other development environments.\n\n15. [Train and deploy AI models with GitLab and Google Vertex AI](/blog/training-and-deploying-ai-models-with-gitlab-and-vertex-ai/)\nA demonstration of GitLab's DevSecOps capabilities combined with Vertex AI's scalable ML platform, designed with the aim of rapid and secure AI deployments. \n\n16. [Self-managed support for Code Suggestions (Beta)](/blog/self-managed-support-for-code-suggestions/)\nOne of our most popular customer requests – self-managed support for Code Suggestsions (Beta) – is expected to ship soon in GitLab 16.1. Learn how it will work.\n\n17. [Meet GitLab Duo - The suite of AI capabilities powering your workflows](/blog/meet-gitlab-duo-the-suite-of-ai-capabilities/)\nLearn about GitLab Duo, an expanding toolbox of features integrated directly into the GitLab platform to assist DevSecOps teams.\n\n18. [GitLab for Visual Studio, including code suggestions, available in Beta](/blog/gitlab-visual-studio-extension/)\nGitLab for Visual Studio extension supports GitLab Duo code suggestions for both GitLab SaaS and GitLab self-managed.\n\n19. [Empower ModelOps and HPC workloads with GPU-enabled runners integrated with CI/CD](/blog/empowering-modelops-and-hpc-workloads-with-gpu-enabled-runners)\nLearn how to leverage our GitLab-hosted GPU-enabled runners for ModelOps and high-performance computing workloads.\n\n20. [GitLab Duo Code Suggestions for JetBrains and Neovim](/blog/gitlab-jetbrains-neovim-plugins/) GitLab plugins for JetBrains IDEs and Neovim are now available in Beta,\nbringing GitLab Duo Code Suggestions to more software development environments.\n\nWant to learn even more about AI/ML? Check out our [AI Assisted Group direction page](/direction/modelops/ai_assisted/) and more [AI/ML articles](/blog/tags.html#AI/ML).\n","ai-ml",[9,1180,916,1181],"product","AI/ML",{"slug":1183,"featured":6,"template":686},"ai-ml-in-devsecops-series","content:en-us:blog:ai-ml-in-devsecops-series.yml","Ai Ml In Devsecops Series","en-us/blog/ai-ml-in-devsecops-series.yml","en-us/blog/ai-ml-in-devsecops-series",{"_path":1189,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1190,"content":1196,"config":1202,"_id":1204,"_type":14,"title":1205,"_source":16,"_file":1206,"_stem":1207,"_extension":19},"/en-us/blog/align-business-strategy-and-app-delivery",{"title":1191,"description":1192,"ogTitle":1191,"ogDescription":1192,"noIndex":6,"ogImage":1193,"ogUrl":1194,"ogSiteName":670,"ogType":671,"canonicalUrls":1194,"schema":1195},"Deliver business value at the speed of business","Read here on how DevOps helps delivering business value with faster cycle times","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671258/Blog/Hero%20Images/just-commit-blog-cover.png","https://about.gitlab.com/blog/align-business-strategy-and-app-delivery","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Deliver business value at the speed of business\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Jeremiah\"}],\n        \"datePublished\": \"2019-04-23\",\n      }",{"title":1191,"description":1192,"authors":1197,"heroImage":1193,"date":1199,"body":1200,"category":679,"tags":1201},[1198],"John Jeremiah","2019-04-23","\n\nWhat’s the point of DevOps and digital transformation? Is this just another “IT project”\nwith limited business value, or will they deliver _real_ value to the business?\n\nThe goal of digital transformation is to change the business with new models,\nnew services, new value, and new ways to connect with customers. Consider the\nobservations of the\n[World Economic Forum’s Digital Transformation Initiative](http://reports.weforum.org/digital-transformation/),\nwhere they argue that a digital transformation will lead to improved customer\nexperience and outcomes, efficiencies, and business models. One of the key ways\nto enable these changes is “agile and digital-savvy leadership” and a technology\ninfrastructure that is ready to respond to changing demands.\n\n**In order to succeed in your digital transformation strategy, you must be able\nto transform your technology delivery processes and platforms.**\n\n## What’s the solution?\n\nThe good news is that many of the key techniques to facilitate faster and more\nresponsive delivery are known. For the past decade, enterprises large and small\nhave found success with adopting DevOps principles to extend Agile project\nplanning to deliver business value at the speed of business. In many ways, DevOps\nis one of the key enablers to unlock the velocity needed for delivery teams to\nrespond to rapidly changing business objectives.\n\nAny sort of IT transformation, such as DevOps, must be defined as a business\ninitiative with tangible business outcomes. However, too often, initiatives like\nAgile and DevOps are relegated to be backbench, IT-focused projects that are set\nup to fail. If your Agile or DevOps transformation project isn’t closely linked\nto business objectives, or if it doesn’t have business stakeholders, then it’s\ntime to go back to the drawing board and re-make the business case to sell the\nvision. As IT leaders, you cannot go it alone.\n\n## Taking a closer look at your value stream\n\nSo, how do you operate and stay focused on business objectives as you accelerate\napplication delivery?  I’ve heard from many customers who find their\n“portfolio planning” process and tools disconnected from the actual work developers\nand delivery teams do. The problem they face is not having visibility into the\ncadence and delivery of new features and capabilities that the business has requested.\nWhile they try to integrate disparate tools to keep track of everything, they\nultimately end up using a patchwork spreadsheets/PowerPoint hybrid to create\ndashboards and reports in the hopes of keeping executives informed. It’s a waste\nof effort, error prone, and frustrating to pull all the data together over and over again.\n\nTo solve this alignment puzzle you need three things:\n\n1. Effective visibility and traceability between business initiatives, coding, and delivery.\n1. Commitment to Agile planning and prioritization.\n1. Automation of manual, error-prone tasks, such as testing, configuring, reporting, and tracking activities.\n\nLet's dig into those:\n\n### Increase visibility\n\nThe first step in achieving success is breaking through the barriers to\ncommunication and collaboration in your organization. Too many different tools,\nspreadsheets, PowerPoint decks, and islands of information create friction and\nconfusion. You need to consider how you can align your policies, processes, and\ntechnology enablers to encourage collaboration, sharing, and visibility into business\ninitiatives. Only then will you be able to respond to the rapidly changing business needs.\n\n### Simplify workflows\n\nIf visibility is the first step in your transformation, then your second step is\nembracing the reality that yesterday’s business plans and priorities may well\nchange tomorrow. The days of annual planning and long-running projects that\ndeliver only after months of effort are gone. The pace of change in the market\ndemands a comparable level of flexibility in our planning and prioritization.\n\n### Favor automation\n\nIf your most valuable assets are your people, then don’t ask them to waste their\ntime and talents on routine manual effort. To improve your ability to accelerate\napplication delivery, you need to examine your processes and policies and\nautomate your manual, repetitive, low-value tasks. This will unlock the untapped\npotential in your team while speeding up your pipeline and reducing error rates.\nThe power of [modern automation](/blog/application-modernization-best-practices/) is a\nkey driver to deliver at the speed of business.\n\nA successful transformation is not only possible, but also crucial to long-term\nsuccess in a market that is moving at a radically faster pace than it was a few\nyears ago. Now is the time to start.\n\nJoin us for an upcoming webinar in which we'll learn how\nsoftware delivery leaders play a vital role in the success of digital transformations.\n\n[Register now](/webcast/justcommit-reduce-cycle-time/)\n{: .alert .alert-gitlab-purple .text-center}\n",[9,683],{"slug":1203,"featured":6,"template":686},"align-business-strategy-and-app-delivery","content:en-us:blog:align-business-strategy-and-app-delivery.yml","Align Business Strategy And App Delivery","en-us/blog/align-business-strategy-and-app-delivery.yml","en-us/blog/align-business-strategy-and-app-delivery",{"_path":1209,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1210,"content":1216,"config":1223,"_id":1225,"_type":14,"title":1226,"_source":16,"_file":1227,"_stem":1228,"_extension":19},"/en-us/blog/align-engineering-security-appsec-tests-in-ci",{"title":1211,"description":1212,"ogTitle":1211,"ogDescription":1212,"noIndex":6,"ogImage":1213,"ogUrl":1214,"ogSiteName":670,"ogType":671,"canonicalUrls":1214,"schema":1215},"How Developer-Centric AppSec Testing Transforms DevOps Teams","Find and fix security bugs faster by implementing developer-centric application security testing in the CI pipeline. And the bonus? Engineering and security will finally be better aligned.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681513/Blog/Hero%20Images/stackhawk.jpg","https://about.gitlab.com/blog/align-engineering-security-appsec-tests-in-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How developer-centric AppSec testing can dramatically change your DevOps team\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Joni Klippert\"}],\n        \"datePublished\": \"2020-08-21\",\n      }",{"title":1217,"description":1212,"authors":1218,"heroImage":1213,"date":1220,"body":1221,"category":791,"tags":1222},"How developer-centric AppSec testing can dramatically change your DevOps team",[1219],"Joni Klippert","2020-08-21","\n\nSoftware development has accelerated dramatically over the past decade. As [DevOps](/topics/devops/) became pervasive, companies went from shipping software monthly to shipping software to production frequently throughout the day. This happened as engineering teams took ownership of the deployment, performance, and resilience of their software. \n\nAnd it has paid off. Companies that have adopted DevOps are deploying software significantly faster, ultimately driving business value as innovation is more rapidly delivered to customers.\n\nSecurity, however, did not keep up. Security teams typically fell into one of two positions - the blocker of frequent deployments or the team perpetually bringing up issues in last month’s work. The need for a shift in the security model is widely known. It was the subject of the [2019 Black Hat Conference keynote](https://www.blackhat.com/us-19/briefings/schedule/index.html#every-security-team-is-a-software-team-now-17280), stats from GitLab’s [2020 Global DevSecOps Survey](https://about.gitlab.com/resources/downloads/2020-devsecops-report.pdf) make this obvious, and we’ve [shared our opinions](https://www.stackhawk.com/blog/application-security-is-broken/) at StackHawk.\n\nI believe there is a solution (or at least a *huge* step in the right direction)... developer-centric [application security](/topics/devsecops/) tooling in the CI pipeline.\n\n## The CI pipeline aligns engineering and security\n\nWhile some in the industry have been debating the term DevSecOps, leading companies have started adopting developer-first security tooling that brings alignment through the CI pipeline. Instrumented correctly, it ensures that security bugs are caught before they hit production and that the fix cycle is drastically shortened.\n\nThe legacy model has security teams running application security tests against production environments. These sort of checks are great if they are your backstop. But if this is the primary way of assessing your application’s security posture, you need to catch up with modern engineering practices. \n\nModern teams are running checks on each microservice that makes up the customer facing application, catching bugs in pipeline, and equipping developers with the information to self serve fixes and triage issues. Fix times are significantly shorter, as developers are still in the context of the code they were working on. By testing microservices vs. the end state application, the underlying bugs are much easier to find and fix. And with developer-centric tooling, developers can fix bugs themselves instead of cycling through siloed internal processes. This structure better aligns each function with their best skill sets. Engineers know the application the best and are most equipped to fix, and security teams are able to focus on strategy instead of Jira ticket creation.\n\nThe key is to get the instrumentation right (read: don’t break the build for stupid stuff).\n\n## Application security tests in CI\n\nThat sounds great in theory, but what does it look like in practice? Getting started is actually more simple than it seems. We suggest adding three application security tests to start:\n\n## Software composition analysis (SCA)\n\nSCA identifies the open source dependencies in your code base and compares that against a database of known security vulnerabilities. Some tools automatically create pull requests to patch outdated libraries. Open source use is exponentially growing, especially with chained dependencies. SCA is incredibly important, but also can be noisy with non-exploitable findings.\n\nSome of the leading vendors in the space are [GitLab](/) and [Snyk](https://snyk.io/), with up and comers like [FOSSA](https://fossa.com/) also worth paying attention to.\n\n## Dynamic application security testing (DAST)\n\nDAST runs security tests against your running application, from localhost to CI to production. The beauty of DAST is that it most closely resembles what an attacker would see, by attacking your running application and reducing false positives. The two things to be sure of as you start testing with DAST is that your scanner is finding all of your paths and API endpoints and that it is able to scan as an authenticated user.\n\nGitLab provides DAST checks for Ultimate tier customers. If you want more robust scanning options and additional functionality to manage and fix bugs, [StackHawk](https://www.stackhawk.com) is the only place to turn (obviously I’m biased here). Other solutions include legacy vendors such as [Rapid7](https://www.rapid7.com/) or open source leader [ZAP](https://www.zaproxy.org/).\n\n## Secrets detection\n\nFinally, you’ll want to ensure that you have detection for leaked secrets in code. This tooling looks for credentials, keys, or other secrets that may have unintentionally been committed to the code base by developers. GitLab includes [secret detection](https://docs.gitlab.com/ee/user/application_security/secret_detection/) in their GitLab Ultimate security tooling.\n\n## Getting started\n\nOftentimes, the thought of adding application security tests to the development workflow feels insurmountable. With a long list of priorities, engineering leadership will sometimes put this off. The reality, however, is that it is not that hard.\n\nAt StackHawk, we see many customers completing their first successful scans within 15 minutes of sign up and instrumentation in CI is literally as easy as adding [a few lines of YAML](https://docs.stackhawk.com/continuous-integration/) to your build.\n\nHere is our recommended playbook of how to get started with AppSec in CI. While this is specific to StackHawk, the principles can be applied to other tools as well.\n\n### Step 1: local testing and config\nAfter signing up and grabbing your API key, start iterating on [configuration](https://docs.stackhawk.com/hawkscan/configuration/) while testing against your application on localhost. This allows you to quickly adjust config and get successful authenticated scans running.\n\n### Step 2: non-blocking CI instrumentation\nAfter you’ve ironed out the configuration locally, add the test to your CI pipeline. At this point, it is strongly recommended to instrument as a non-blocking test so that you can triage any existing findings and smooth out any kinks.\n\n#### Step 3: bug triage - fix critical issues in flight, backlog and discuss the rest\nAfter your first non-blocking CI run, start triaging any initial findings. Any bugs marked as High criticality should likely be fixed with some sense of urgency. Lows and Mediums should be triaged depending on your application and the bugs, either quickly addressed or added to a backlog for review. Existing findings should not be the blocker for you instrumenting checks to ensure that new bugs don’t get shipped to production.\n\n#### Step 4: switch to blocking tests\nAfter ironing out config locally and in CI, and then triaging initial findings, it is time to finalize the roll out. Switch the StackHawk test to blocking mode to ensure that new security bugs don’t hit production. You can set the scanner to break on High or Medium and High, which depends on your business and the nature of the application. With this in place, you can be confident that production-ready applications have been scanned for security.\n\n## Cultural shifts: it is more than CI\nThe CI pipeline is the natural hingepoint to start aligning engineering and security. A cultural shift, however, is absolutely needed. (If you're doubtful about this, here's a frank look at why [dev and sec don't get along](/blog/developer-security-divide/).) Modern engineering teams recognize that delivering a secure application is part of quality engineering. Engineers aren’t comfortable shipping applications with UI bugs, and they shouldn’t accept security holes either. \n\nSecurity, on the other hand, needs to shift from the blocker to speedy development and to the enabler of safety in an environment of high speed delivery. Modern security engineers are ensuring that their teams are working with safe-by-default frameworks, are equipped with developer-centric tooling, and that there are proper integration tests for business logic that can’t be tested by external tooling.\n\nWhile there is significant catch up needed, it is encouraging to see the leading software teams out there testing application security on every build.\n\n## Dig deeper\n\nTo learn more about adding AppSec tests to your CI build, join me at my [How Security Belongs in DevOps](https://sched.co/dUWD) talk at GitLab Commit on August 26th. You can also always sign up for a [free StackHawk trial or demo](https://www.stackhawk.com) or talk to your GitLab sales representative about the security features in GitLab Ultimate. And for the best of both worlds, check out more details on running [automated security testing with StackHawk in GitLab](https://docs.stackhawk.com/continuous-integration/gitlab.html).\n\n_Joni Klippert is founder & CEO of StackHawk, a software-as-a-service company built to help developers find and fix security vulnerabilities in their code. Joni has been building software for developers for more than 10 years, previously serving as VP Product, VictorOps from seed stage to acquisition by Splunk. Joni is a Colorado native and holds an MBA from the University of Colorado. She currently lives in Denver with her fiance Jason and Whippet \"Q\"._\n\nCover image by [Adi Goldstein](https://unsplash.com/@adigold1) on [Unsplash](https://unsplash.com)\n{: .note}\n\n\n\n",[109,749,9,875,1158,683],{"slug":1224,"featured":6,"template":686},"align-engineering-security-appsec-tests-in-ci","content:en-us:blog:align-engineering-security-appsec-tests-in-ci.yml","Align Engineering Security Appsec Tests In Ci","en-us/blog/align-engineering-security-appsec-tests-in-ci.yml","en-us/blog/align-engineering-security-appsec-tests-in-ci",{"_path":1230,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1231,"content":1237,"config":1244,"_id":1246,"_type":14,"title":1247,"_source":16,"_file":1248,"_stem":1249,"_extension":19},"/en-us/blog/amazon-linux-2-service-ready-partner",{"title":1232,"description":1233,"ogTitle":1232,"ogDescription":1233,"noIndex":6,"ogImage":1234,"ogUrl":1235,"ogSiteName":670,"ogType":671,"canonicalUrls":1235,"schema":1236},"GitLab is now an Amazon Linux 2 Service Ready Partner","Being an Amazon Linux 2 Service Ready partner shows GitLab's strong commitment to AWS linux distributions.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682451/Blog/Hero%20Images/isis-franca-hsPFuudRg5I-unsplash.jpg","https://about.gitlab.com/blog/amazon-linux-2-service-ready-partner","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab is now an Amazon Linux 2 Service Ready Partner\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2022-09-21\",\n      }",{"title":1232,"description":1233,"authors":1238,"heroImage":1234,"date":1240,"body":1241,"category":791,"tags":1242},[1239],"Darwin Sanoy","2022-09-21","\n\nSeveral months ago, we shared that GitLab started officially supporting Amazon Linux 2 as well as providing packages for GitLab and GitLab Runner for x86 and Graviton ARM architectures.\n\nGitLab’s hard working Enablement Engineering team has taken this commitment to the next level by acquiring Amazon’s Service Ready Partner designation for Amazon Linux 2.\n\nThe AWS Service Ready program requires that GitLab provide specific evidence in regard to support, compatibility testing and security testing in order to deploy GitLab on Amazon Linux 2 with confidence.\n\nHere is GitLab’s [Amazon Linux 2 Service Ready Partner listing](https://aws.amazon.com/amazon-linux-2/partners/?partner-solutions-cards.sort-by=item.additionalFields.partnerNameLower&partner-solutions-cards.sort-order=asc&awsf.partner-solutions-filter-partner-type=*all&partner-solutions-cards.q=GitLab&partner-solutions-cards.q_operator=AND).\n\n## Amazon Linux 2 support in GitLab 15.0\n\nAmazon Linux 2 is supported in GitLab 15.0 and later. An [earlier blog](/blog/amazon-linux-2-support-and-distro-specific-packages/) discusses a variety of important points and provides some code in order to plan a smooth transition.\n\nThe Service Ready Designation has been received for version 15.3, but there were no changes made to the process from 15.0 to support the designation.\n\nGitLab Runner has had ARM64 binaries since 12.6.0 and now has Amazon Linux 2 RPM packages for those wanting package-based installs.\n\n## Inside the distribution team process for distribution support\n\nIt would be easy to think that adding support for additional Linux distros is a simple and easy process - but there is actually a lot of effort that goes into it. GitLab’s Distribution Team uses GitLab itself to apply full DevOps disciplines to the continuous building, testing and distribution of packaging for Amazon Linux. Here are just some of the steps in preparing a GitLab release for packaging:\n\n- Create an elastic scaling distro-specific CI build environment.\n- Create a distro-specific CI test environment.\n- 2380 compatibility tests are performed on the GitLab code base.\n- SAST and dependency security scanning are completed and a specific escalation procedure is applied for any vulnerabilities that are found.\n- Primary distributions such as distro specific .deb and .rpm packages are prepared specifically for each distribution.\n- Secondary distributions are done as well - this is when the official GitLab AMI is created.\n- CI builds and testing generally happen multiple times a week for Amazon Linux.\n\n![Amazon Linux 2 Test Results](https://about.gitlab.com/images/blogimages/2022-09-amazonlinux/al2testsubgroups.png)\n\n![Amazon Linux 2 Test Results](https://about.gitlab.com/images/blogimages/2022-09-amazonlinux/al2tests.png)\n\n## Need-to-know takeaways\n\n- GitLab is now an official Amazon Linux 2 Service Ready Partner.\n- Amazon Linux 2 RPM packages are available for GitLab from version 15.0 and for GitLab Runner.\n\n> **Note**\n> This blog post and linked pages contain information related to upcoming products, features, and functionality. It is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog post and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab Inc.\n\n![AWS Partner Logo](https://about.gitlab.com/images/blogimages/2022-09-amazonlinux/amazonlinuxandgravitonready.png){: .right}\n",[1243,282,9],"AWS",{"slug":1245,"featured":6,"template":686},"amazon-linux-2-service-ready-partner","content:en-us:blog:amazon-linux-2-service-ready-partner.yml","Amazon Linux 2 Service Ready Partner","en-us/blog/amazon-linux-2-service-ready-partner.yml","en-us/blog/amazon-linux-2-service-ready-partner",{"_path":1251,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1252,"content":1258,"config":1264,"_id":1266,"_type":14,"title":1267,"_source":16,"_file":1268,"_stem":1269,"_extension":19},"/en-us/blog/android-cicd-with-gitlab",{"title":1253,"description":1254,"ogTitle":1253,"ogDescription":1254,"noIndex":6,"ogImage":1255,"ogUrl":1256,"ogSiteName":670,"ogType":671,"canonicalUrls":1256,"schema":1257},"Tutorial: Android CI/CD with GitLab","Learn how to create an automated Android CI/CD pipeline using GitLab and fastlane.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669316/Blog/Hero%20Images/angela-compagnone-4Iyg6cNU7sI-unsplash.jpg","https://about.gitlab.com/blog/android-cicd-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: Android CI/CD with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darby Frey\"}],\n        \"datePublished\": \"2023-06-13\",\n      }",{"title":1253,"description":1254,"authors":1259,"heroImage":1255,"date":1261,"body":1262,"category":791,"tags":1263},[1260],"Darby Frey","2023-06-13","\n\nMention the word keystore and all Android developers in a 5km radius will suddenly have a small feeling of panic. Attempting to automate a [CI/CD](https://docs.gitlab.com/ee/ci/) pipeline to deploy an app can be frustrating, and configuring Google Play access and code signing is at the heart of the problem.\n\nBut fear not! GitLab Mobile DevOps is here to make this process easier and faster, and I am here to guide you.\n\n[GitLab Mobile DevOps](https://docs.gitlab.com/ee/ci/mobile_devops.html) is a collection of features built right into GitLab to solve the biggest challenges mobile teams face in establishing a DevOps practice.\n\nIn this blog post, I’ll demonstrate how to set up an automated CI/CD pipeline using GitLab and [fastlane](https://fastlane.tools/).\n\n## Prerequisites \nTo get started, there are a few prerequisites you’ll need:\n\n* A Google Play developer account - [https://play.google.com/console](https://play.google.com/console)\n* Ruby and Android Studio installed on your local machine [https://docs.fastlane.tools/getting-started/android/setup/](https://docs.fastlane.tools/getting-started/android/setup/)\n\n> Try your hand at the [iOS CI/CD for GitLab tutorial](https://about.gitlab.com/blog/ios-cicd-with-gitlab/)\n\n## Reference project\nFor this tutorial, we’ll use the Android demo project for reference: [https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/android_demo](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/android_demo).\n\n## Install fastlane\nIf you haven’t done so yet, the first step will be to install fastlane. Do this by creating a file in the root of your project called `Gemfile`. Give it the following contents:\n\n```ruby\nsource \"https://rubygems.org\"\n\ngem \"fastlane\"\n```\n\nThen, from the terminal in your project, run:\n\n```\nbundle install.\n```\n\nThis command will install fastlane, and all of its related dependencies.\n\n## Initialize fastlane\nNow that fastlane is installed, we can set it up for our project. Run the following command from the terminal in your project. You’ll be asked to enter your package name, so enter that. When prompted for the JSON secret file, you can skip that for now, and you can answer \"no\" to the questions about metadata management.\n\n```\nbundle exec fastlane init\n```\n\n![Initialize fastlane](https://about.gitlab.com/images/blogimages/2023-06-13-android-cicd-with-gitlab/fastlane-init.png)\n\nRunning this command will create a new folder in your project called `fastlane`. This folder will contain two files `Appfile` and `Fastfile`.\n\nThe Appfile contains the configuration information for the app, and the Fastfile has some sample code that we will replace later. See the fastlane docs for more information about the configuration details in the Appfile: [https://docs.fastlane.tools/advanced/Appfile/](https://docs.fastlane.tools/advanced/Appfile/).\n\n## Code signing\nNext are the steps for code signing.\n\n### Create a keystore\nThe next step is to create a keystore and properties files for code signing. Run the following command to generate a keystore in the project root called `release-keystore.jks`:\n\n```\nkeytool -genkey -v -keystore release-keystore.jks -storepass password -alias release -keypass password -keyalg RSA -keysize 2048 -validity 10000\n```\n\n![Create a keystore](https://about.gitlab.com/images/blogimages/2023-06-13-android-cicd-with-gitlab/keytool-genkey.png)\n\nMore information is available in the [keytool docs](https://download.java.net/java/early_access/loom/docs/specs/man/keytool.html).\n\nThe next step is to create a properties file to be used by [Gradle](https://gradle.org/_). Create a file in the project root called `release-keystore.properties`, with the following contents:\n\n```\nstoreFile=../release-keystore.jks\nkeyAlias=release\nkeyPassword=password\nstorePassword=password\n```\n\nAlso, be sure to add both files to your `.gitignore` file so they aren't committed to version control.\n\n### Configure Gradle\nNext, configure Gradle to use the newly created keystore. In the `app/build.gradle` file, add the following:\n\n**1.** Right after the plugins section, add:\n\n```\ndef keystoreProperties = new Properties()\ndef keystorePropertiesFile = rootProject.file('release-keystore.properties')\nif (keystorePropertiesFile.exists()) {\n    keystoreProperties.load(new FileInputStream(keystorePropertiesFile))\n}\n```\n\n**2.** Before Build Types, add:\n\n```\nsigningConfigs {\n    release {\n   \t keyAlias keystoreProperties['keyAlias']\n   \t keyPassword keystoreProperties['keyPassword']\n   \t storeFile keystoreProperties['storeFile'] ? file(keystoreProperties['storeFile']) : null\n   \t storePassword keystoreProperties['storePassword']\n    }\n}\n```\n\n**3.** Lastly, add the signingConfig to the release build type:\n\n```\nsigningConfig signingConfigs.release\n```\n\n## Upload keystore to GitLab secure files\nNext, upload your keystore files to GitLab so they can be used in CI/CD jobs. \n\n1. On the top bar, select **Menu > Projects** and find your project.\n1. On the left sidebar, select **Settings > CI/CD**.\n1. In the Secure Files section, select **Expand**.\n1. Select **Upload File**.\n1. Find the file to upload, select **Open**, and the file upload begins immediately. The file shows up in the list when the upload is complete.\n\nDo this for both the `release-keystore.jks` file and the `release-keystore.properties` file.\n\n![Upload Secure File](https://about.gitlab.com/images/blogimages/2023-06-13-android-cicd-with-gitlab/upload-secure-file.png)\n\n![List Secure Files](https://about.gitlab.com/images/blogimages/2023-06-13-android-cicd-with-gitlab/list-secure-files.png)\n\n## Create a CI/CD pipeline\n\nWith the configuration in place, now copy the contents of the .gitlab-ci.yml and fastlane/Fastfile below to the project.\n\nThis [.gitlab-ci.yml](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/android_demo/-/blob/main/.gitlab-ci.yml) has all the configuration needed to run the test, build, and beta jobs.\nThe [fastlane/Fastfile](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/android_demo/-/blob/main/fastlane/Fastfile) is an example that can be customized to specific project settings.\n\nNote: This fastlane configuration uses plugins. See the [docs](https://docs.fastlane.tools/plugins/using-plugins/) for instructions on how to configure your project for fastlane plugins.\n\n## Create an app in the Google Play Console\nNext, generate a build of your app locally and upload it to seed a new app entry in the Google Play Console. Run the following command locally:\n\n```\nbundle exec fastlane build\n```\n\nThis command will create a signed build of the app at\n\n```\nbuild/outputs/bundle/release/app-release.aab\n```\n\nWith the signed build ready to go, log in to the [Google Play Console](https://play.google.com/console) and create a new app and seed it with the initial build.\n\n## Configure Google Play integration\nThe last thing to set up is the Google Play integration in GitLab. To do so, first, create a Google service account.\n\n### Create a Google service account\nFollow the [instructions](https://docs.fastlane.tools/actions/supply/#setup) for setting up a service account in Google Cloud Platform and granting that account access to the project in Google Play.\n\n### Enable Google Play integration\nFollow the [instructions](https://docs.gitlab.com/ee/user/project/integrations/google_play.html) for configuring the Google Play integration by providing a package name and the JSON key file just generated for the service account.\n\nThis is a simplified CI/CD configuration that created three CI/CD jobs to run each of the lanes in fastlane on the GitLab Runners. The test and build jobs will run for all CI/CD pipelines, and the beta job will only be run on CI/CD pipelines on the main branch. The beta job is manually triggered, so you can control when the beta release is pushed to Google Play. \n\nWith these configurations in place, commit all of these changes and push them up to your project. The CI/CD pipeline will kick off, and you can see these jobs in action.\n",[9,109,978],{"slug":1265,"featured":6,"template":686},"android-cicd-with-gitlab","content:en-us:blog:android-cicd-with-gitlab.yml","Android Cicd With Gitlab","en-us/blog/android-cicd-with-gitlab.yml","en-us/blog/android-cicd-with-gitlab",{"_path":1271,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1272,"content":1277,"config":1282,"_id":1284,"_type":14,"title":1285,"_source":16,"_file":1286,"_stem":1287,"_extension":19},"/en-us/blog/application-modernization-best-practices",{"title":1273,"description":1274,"ogTitle":1273,"ogDescription":1274,"noIndex":6,"ogImage":1193,"ogUrl":1275,"ogSiteName":670,"ogType":671,"canonicalUrls":1275,"schema":1276},"7 Best practices for application modernization","Use these best practices to avoid common pitfalls on the application modernization journey.","https://about.gitlab.com/blog/application-modernization-best-practices","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"7 Best practices for application modernization\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-03-27\",\n      }",{"title":1273,"description":1274,"authors":1278,"heroImage":1193,"date":1279,"body":1280,"category":791,"tags":1281},[788],"2019-03-27","\n\nA journey starts with a single step, any motivational poster can you tell you that, but what about all the steps after?\nEven if you know where you're going, are you getting there in the most efficient way possible?\nBefore you start an application modernization quest of your own, it helps to get an idea of the road ahead.\n\nYou don't have to have everything mapped out from the start, and chances are high your plans will change.\nEnterprises can learn a lot from [teams that modernized their legacy systems successfully](/blog/application-modernization-examples/), but there are also valuable lessons from those that failed.\n\n## Why legacy modernization projects fail\n\nEnterprises that dive into the application modernization process are trying to solve big problems, but great intentions rarely guarantee success.\nIn 1999, Carnegie Mellon researchers dove into [10 reasons why legacy re-engineering efforts fail](https://www.cs.cmu.edu/~aldrich/courses/654-sp05/readings/Bergey99.pdf) that are still very relevant today:\n\n1. The organization adopts a flawed strategy from the start.\n2. The organization relies too heavily on outside consultants.\n3. The team is tied down to old technologies and inadequate training.\n4. The organization thinks it has its legacy system under control (it doesn't).\n5. The needs of the organization are oversimplified.\n6. The overall software architecture isn't given enough consideration.\n7. There is no defined application modernization process.\n8. Inadequate planning and follow through.\n9. Lack of long-term commitment to the strategy.\n10. Leaders pre-determine technical decisions.\n\nEvery team faces legacy modernization challenges.\nCommitting to the process is the first step to meeting those challenges head on.\nAs teams go through the modernization journey, use these best practices to avoid common pitfalls and ensure long-term success.\n\n### 1. Create a modernization team\n\nGroups can learn a lot from each other and a variety of voices at the table can point out weaknesses and improve the modernization process.\nWhen choosing a team or developing an innovation group, avoid thinking along legacy lines which divide teams by stages of the software lifecycle.\n[Think about building a cross-functional team of 8–12 people](/blog/beyond-application-modernization-trends/) who can focus on developing the culture, process, and tools needed to continuously deliver software.\n\n### 2. Disagree, commit, and disagree\n\nWith more voices come more opinions.\nIt's a powerful way to innovate and generate great ideas, but it's also the most effective way to be ineffective.\nDecisions sometimes have to be made without a 100 percent buy-in.\nEverything can be questioned but as long as a decision is in place, we should expect people to commit to executing it.\n\"Disagree and commit\" is [one of GitLab's core values](https://handbook.gitlab.com/handbook/values/#disagree-commit-and-disagree) and it's a common business principle that keeps projects moving forward.\n\nWhether decisions are left to one individual or distributed will largely depend on the size of the organization.\nFor all final decision-makers during the application modernization process: listen to other points of view, thank those who contribute ideas and feedback, consider options carefully, and commit to a course of action.\n\n### 3. Map the development workflow\n\nMany organizations have been bogged down by the sheer number of tools, plug-ins, and platforms they use to accommodate everyday tasks.\nSome workflows have more in common with a Rube Goldberg device than a logical order of operation, but mapping out the development workflow is a necessity when undertaking a legacy modernization project.\nThis step is usually when the headaches of toolchain complexity come to light.\n\nLook at every tool being used across teams and identify dependencies.\nMore handoffs present more opportunities for single points failure, and any new applications added to the mix need to be able to play well with others.\nEven if you don't mind teams finding their own solutions so that they can work creatively, it's a good rule of thumb to [identify all privately used tools](https://www.pluralsight.com/blog/career/shadow-it-security-threat) that might be in the mix.\n\n### 4. Set small modernization goals\n\nHaving an entire timeline mapped out months in advance sets you up for failure.\nWhy? Projects inevitably change once they get started.\nTrying to map moving targets months in advance is an exercise in futility that ends in projects that are rushed, incomplete, or late anyway. Reducing the cycle time and focusing instead on iterating towards smaller goals will have a much higher likelihood of success.\nTeams that master iteration respond to feedback faster, adapt more quickly, and complete their projects faster than their large-scale counterparts.\nBy shortening the timeframe and reducing the scope of each goal, you're able to respond to changing needs, adjust your long-term plans with the feedback you receive along the way, and radically reduce engineering risk.\n\nWhen planning for major milestones (when certain tools will be retired or migrated, when updates will occur, team training, etc), focus instead on the many small steps between them.\nA smaller deploy introduces less changes that can potentially introduce issues, ensuring that each steps moves smoothly.\n\n### 5. Prioritize legacy data\n\nPreventing data loss is a key priority during the modernization process.\nEvaluate the data being processed, moved, and stored and put it into categories.\nWhether it's \"high, moderate, or low\" or \"green, yellow, and red,\" make sure the team understands each data category and what safeguards to have in place for each.\n\n### 6. Don't modernize bad habits\n\nMany organizations have squandered a clean slate by infusing new tools with old habits.\nTake a close look at your development workflow and identify instances of duplicated data, manual tasks, inefficiencies, and other habits that could derail your application modernization process in its tracks.\nMany of these practices are due to a lack of training or documentation – both easily fixable problems.\nA new tool doesn't solve bad habits, but bad habits can derail new tools.\n\n### 7. Close the skill gap\n\nThe number of programming languages, tools, systems, and methodologies that developers have to know is immense.\nIt's a challenge for teams to develop the knowledge they need to work quickly, and adding a new system to the mix should be considered carefully.\nKeeping teams in the loop on changes and then dedicating resources to make sure they understand how to navigate the new workflows will be the _most important part of the application modernization process_.\nMake this an ongoing, long-term commitment to organizational success and continue to document best practices long after legacy systems are turned off. Tools are only as good as the people who use them.\n\nAre you ready to tackle application modernization? [Just commit.](/blog/application-modernization-best-practices/)\n",[976,9],{"slug":1283,"featured":6,"template":686},"application-modernization-best-practices","content:en-us:blog:application-modernization-best-practices.yml","Application Modernization Best Practices","en-us/blog/application-modernization-best-practices.yml","en-us/blog/application-modernization-best-practices",{"_path":1289,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1290,"content":1295,"config":1300,"_id":1302,"_type":14,"title":1303,"_source":16,"_file":1304,"_stem":1305,"_extension":19},"/en-us/blog/application-modernization-examples",{"title":1291,"description":1292,"ogTitle":1291,"ogDescription":1292,"noIndex":6,"ogImage":1193,"ogUrl":1293,"ogSiteName":670,"ogType":671,"canonicalUrls":1293,"schema":1294},"Examples of legacy modernisation projects","Discover how four teams committed to the application modernization process.","https://about.gitlab.com/blog/application-modernization-examples","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Examples of legacy modernisation projects\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-03-14\",\n      }",{"title":1291,"description":1292,"authors":1296,"heroImage":1193,"date":1297,"body":1298,"category":791,"tags":1299},[788],"2019-03-14","\n\nFine wine and cheese. Whiskey. Paul Rudd. There are a lot of things that get better with age – legacy systems are _not_ one of them.\n\n## The true cost of legacy systems\n\nOver time, the true cost of legacy systems is enormous: from additional resources needed to maintain them, to lost productivity, they can hinder investments in long-term growth. In highly regulated industries, they can even be a financial liability.\n[Health Insurance Portability and Accountability Act (HIPAA) violations in 2018 resulted in over $28 million in fines](https://compliancy-group.com/hipaa-fines-directory-year/), many of them due to data breaches.\nAs legacy systems grow older, it's [easy to miss critical security patches (if any are even available)](https://www.globalscape.com/blog/how-high-risk-legacy-systems-are-hurting-your-business), making your system more vulnerable to malicious actors ready to use old Java and SSL exploits to expose your network.\n\nEven if we can all agree that legacy system modernization is important, it still takes work.\n[Analysis paralysis is a real phenomenon in the digital transformation journey](/blog/beyond-application-modernization-trends/).\nRipping off the band-aid and committing to faster deployment feels overwhelming, and there are so many application modernization trends to consider. But not taking action puts a ceiling on growth.\n\n## Status quo \u003C Innovation\n\nMany large enterprises feel tied down to current practices because there just aren't enough resources left to innovate once legacy systems are maintained.\nFor example, [the greater part of the IT-related federal budget of the United States ($80 billion) goes to maintaining legacy systems.](https://www.spiria.com/en/blog/method-and-best-practices/cost-legacy-systems/)\nWhen large companies can only devote 20 percent of their budget to software modernization, things move even more slowly.\nObsolete systems create a vicious cycle where enterprises feel they have to choose between innovation or keeping things running.\n\nInstead of focusing on a full rip-and-replace of legacy systems, an application modernization strategy that identifies specific challenges reduces potential disruptions.\nMaking goals and achieving them one step at a time can make a big impact.\n\n## How to modernize applications\n\nThese examples of legacy application modernization show how four teams identified challenges, set manageable goals, and decided to [#JustCommit](https://twitter.com/search?q=just+commit) to development efficiency.\n\n### 1. Leveraging microservices\n\nWith a monolithic architecture, everything is developed, deployed, and scaled together.\nWith microservices, each component is broken out and deployed individually as services and the services communicate with each other via API calls.\n[Leveraging microservices allows teams to deploy faster and achieve scale, all at a lower cost](/topics/microservices/).\nAsk Media Group recently participated in a webcast where they discussed their transition from monoliths to microservices leveraging containers, Kubernetes, and AWS.\n\n[Watch the webcast](/webcast/cloud-native-transformation/)\n{: .alert .alert-gitlab-purple}\n\n### 2. Improving automation\n\nEquinix, a leading global data center company with over 180+ colocation facilities across five continents, wanted a solution that would help developers code better and faster, to bring customers new features quickly.\nWhile their old system was fine in the beginning, they needed a more robust solution that could meet their enterprise control and scaling needs. See how Equinix increased the agility of their developers, without sacrificing quality, through automation.\n\n{: .alert .alert-gitlab-purple}\n\n### 3. Simplifying the toolchain\n\nGoldman Sachs, one of the largest financial institutions in the world with over $1.5 trillion in assets, had some challenges in their technology division.\nAs a critical center of the financial provider's business, speed is essential, but a complex toolchain with too many parts was slowing them down.\nIn order to have faster deployment cycles and increase concurrent development, they knew they needed to simplify their toolchain. One cohesive environment helped them improve visibility and efficiency.\n\n[Read the case study](/customers/goldman-sachs/)\n{: .alert .alert-gitlab-purple}\n\n### 4. Reducing lifecycles\n\nChris Hill, Head of Systems Engineering for Infotainment at Jaguar Land Rover, shared his team's journey from feedback loops of 4-6 weeks to _just 30 minutes_ at the DevOps Enterprise Summit London in 2018.\nWho says you need to be stuck with a traditional release cadence?\n\n[Watch the presentation](/blog/chris-hill-devops-enterprise-summit-talk/)\n{: .alert .alert-gitlab-purple}\n\nAre you ready to tackle application modernization? [Just commit.](/blog/application-modernization-best-practices/)\n",[109,9,1041],{"slug":1301,"featured":6,"template":686},"application-modernization-examples","content:en-us:blog:application-modernization-examples.yml","Application Modernization Examples","en-us/blog/application-modernization-examples.yml","en-us/blog/application-modernization-examples",{"_path":1307,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1308,"content":1314,"config":1320,"_id":1322,"_type":14,"title":1323,"_source":16,"_file":1324,"_stem":1325,"_extension":19},"/en-us/blog/arm-open-source-makes-a-seamless-migration-to-gitlab",{"title":1309,"description":1310,"ogTitle":1309,"ogDescription":1310,"noIndex":6,"ogImage":1311,"ogUrl":1312,"ogSiteName":670,"ogType":671,"canonicalUrls":1312,"schema":1313},"Arm Open Source makes a seamless migration to GitLab","DevOps platform switch reaps cost savings of up to 20%.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670681/Blog/Hero%20Images/a-creative-agencys-gitlab-wishlist.jpg","https://about.gitlab.com/blog/arm-open-source-makes-a-seamless-migration-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Arm Open Source makes a seamless migration to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-10-03\",\n      }",{"title":1309,"description":1310,"authors":1315,"heroImage":1311,"date":1316,"body":1317,"category":1318,"tags":1319},[745],"2022-10-03","[Arm](https://www.arm.com/) wanted to modernize its infrastructure to span across internal (private) and open-source collaborative repositories, and, in the process, consolidate some of its key projects on the same underlying infrastructure. Arm selected GitLab as its new platform. \n\nArm builds software that acts as enablement pieces that can be integrated with other software on its architecture. These are foundational pieces of software that often underpin commercial software offerings, from operating systems to middleware applications. Over 99% of mobile devices have Arm-based processors and the software from the Open Source Engineering team powers computers from sensors up to the cloud.\n\n“The magic really happens when you join enablement pieces with other bits of software from other communities and other projects,” explains Andrew Wafaa, distinguished engineer and senior director of software communities at Arm.\n\nThe goal is to give software developers the best of the Arm architecture, he adds. The enablement pieces “leverage a lot of the bells and whistles from the Arm architecture and that allows people to take those and integrate them with other stacks.”\n\n## GitLab open source lets Arm use its own tooling\n\nArm had a mix of stand-alone Git servers internally and public web-based Git service and wanted to consolidate to a single solution for the company’s larger projects. However,  most of the new core infrastructure that Arm is deploying is on native Arm-based hardware, and the Git service is a proprietary solution. \n\nArm would have to work with its previous platform provider to ensure correctness. According to Wafaa, “We'd have to do reviews, and the patch review process is challenging because it's all private and proprietary code, which was a big factor for us in choosing GitLab.” In addition, Arm had concerns about the code ownership of their OSS projects hosted on the external service. Therefore, Arm determined an open source solution like GitLab would be the best option to maximize choice, be cost effective, and minimize vendor lock-in. Moving to GitLab’s self-hosted platform supported effective collaboration and enabled Arm’s software to be hosted on Arm technology.  \n\nAnother large bonus is that because [GitLab is open source](/solutions/open-source/), Arm can use its own tools to support its open source ecosystem. “Using an open source product made sense at the end of the day,’’ Wafaa says. “Another big factor was that GitLab is an enterprise-grade product that provides very similar workflows to what Arm was already using. It was very easy to move from our previous platform to GitLab; the terminology is very similar, as well as the look and feel.”\n\nFurther, GitLab is a self-hosted enterprise product, and it was important to Arm to have good customer support in the event that something goes wrong.\n\nArm hosts about 200 external open source projects, so of course cost was also a consideration, Wafaa says. “When we're looking at future growth plans there needs to be a reasonable amount of savings and GitLab made it appealing cost-wise.” \n\n## Maintaining control every step along the way\n\nArm is in the process of moving internal workloads to the Arm architecture. Although GitLab didn't initially support Arm, the company “was quite happy to work with us and our engineering teams to ensure that it did support Arm” by creating integrations with its infrastructure, Wafaa says.\n\n“The fact that we could have that fine-grained access control was a huge benefit to us and being able to replicate it on AWS Graviton EC2 instances globally gave us that full redundancy and disaster recovery requirements to meet our IT's needs,\" Wafaa says.\n\nBecause Arm is an IP company, security is paramount. Wafaa says they opted for a gradual migration before scaling out. “For us to deploy, we have to go through a number of approvals with various security teams internally, and that went fairly smoothly. It just worked.”\n\nAfter a “mini deployment,” everything is working seamlessly, he says. Now, anyone can run GitLab on Arm from an enterprise perspective.\n\nThen Wafaa and others held their collective breath awaiting feedback. “Our engineering teams can be quite demanding of the infrastructure provided. They are very, very particular.”\n\nSince the teams have been migrated onto GitLab, “they have been full of praise,” which was a pleasant surprise for Dean Arnold, Arm’s DevOps lead for the open source engineering org, Wafaa says, “because he's not used to getting praise from them. It stood up and worked really well for them.”\n\nMigration to GitLab is ongoing with about 90 percent of it complete. “Certain projects are taking longer because they have complex tooling and the integration pieces are still being ironed out,” Wafaa says.\n\nWith the adoption of GitLab, Arm’s Open Source Engineering teams can now offer full end-to-end native development, and can confidently say “software development by Arm, for Arm, on Arm”. GitLab is not just a DevOps tool, it is a tool that helps companies like Arm offer a complete developer experience.\n\n## Solid metrics for Arm\n\nWith GitLab, Arm has found a number of benefits:\n- Ease of CI/CD set up and integration\n- Cost savings of between 15% and 20%\n- Time savings of an average two to four people a month on admin work\n- Tool simplification\n- The ability to share and collaborate on pipelines/code\n- Quick setup of new projects and onboarding of teams\n\nPreviously, there were multiple individual components that would have to be then stitched together, Wafaa says. “GitLab actually offers us more features and more functionality than we're necessarily used to, and that’s great.”\n\nThat’s especially useful because other contributors want to use pretty much every feature possible for their projects, both for corporate and personal use. For example, one engineer uses GitLab in a personal capacity and wants full CI capabilities.\n\nBoth Wafaa and Arnold are confident that once the migration is completed, there will be significant time savings and projects will be onboarded quickly.\n\n## Deployment in the clouds\n\nOn tap now is working through how to share parts of the pipelines so that teams can adopt things quicker, Arnold says. By the time the migration is completed, Arm will have most of what contributors need, he says.\n\nRight now, Arm is using AWS EC2 instances. Looking ahead, Arnold envisions that [deployment between cloud providers](/topics/multicloud/) will become more seamless without having to change underlying code.\n\nSays Wafaa, “Once we've got people fully onto GitLab, then we'll look at how we can expand it and perhaps provide a more robust level of redundancy across geographies via the containerized route. This is an area of ongoing collaboration between Arm and GitLab, and we hope to be able to deploy soon.”","open-source",[793,682,9],{"slug":1321,"featured":6,"template":686},"arm-open-source-makes-a-seamless-migration-to-gitlab","content:en-us:blog:arm-open-source-makes-a-seamless-migration-to-gitlab.yml","Arm Open Source Makes A Seamless Migration To Gitlab","en-us/blog/arm-open-source-makes-a-seamless-migration-to-gitlab.yml","en-us/blog/arm-open-source-makes-a-seamless-migration-to-gitlab",{"_path":1327,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1328,"content":1334,"config":1340,"_id":1342,"_type":14,"title":1343,"_source":16,"_file":1344,"_stem":1345,"_extension":19},"/en-us/blog/auto-devops-explained",{"title":1329,"description":1330,"ogTitle":1329,"ogDescription":1330,"noIndex":6,"ogImage":1331,"ogUrl":1332,"ogSiteName":670,"ogType":671,"canonicalUrls":1332,"schema":1333},"Auto DevOps 101: How we’re making CI/CD easier","VP of product strategy Mark Pundsack shares everything you need to know about Auto DevOps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666915/Blog/Hero%20Images/autodevops.jpg","https://about.gitlab.com/blog/auto-devops-explained","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Auto DevOps 101: How we’re making CI/CD easier\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2019-10-07\",\n      }",{"title":1329,"description":1330,"authors":1335,"heroImage":1331,"date":1336,"body":1337,"category":679,"tags":1338},[851],"2019-10-07","\nContinuous integration and continuous delivery (CI/CD) are the gold standards of software development but they can be challenging to achieve. GitLab’s [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) feature is designed to make the CI/CD process much easier with baked-in best practices and automation that will move code seamlessly through the entire development lifecycle. [Mark Pundsack](/company/team/#markpundsack), VP of product strategy, demonstrated how straightforward – and customizable – Auto DevOps is during our company-wide meeting, [Contribute 2019](/blog/how-we-scaled-our-summits/). Here’s what you need to know to get started with [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/).\n\n## It’s a shift... left\n\n“Auto DevOps is a [CI/CD pipeline](/topics/ci-cd/) that we have defined for you,” Mark says. “It’s basically all these best practices that we want to encourage everybody to have, and we believe are a good baseline for software development.” The goal is to have everyone set up to do CI/CD, but not just the bare minimum CI/CD, he says. “Like most people when they create a project, they start with running tests. That's the natural thing for CI. And then maybe they'll even get into CD, but they're not going to do things like [code quality](https://docs.gitlab.com/ee/ci/testing/code_quality.html) analysis and security analysis. And we really believe in the [shift left movement](/blog/secure-containers-devops/). If you look at everything as a pipeline, we want to take security and things like that which are stuck at the end and we want to move them as far left as possible. We believe you should be checking for security even on your first deploy. So we said, okay, let's put all that in there and make a script that says this is everything that you should be doing, so let's just do it for you.”\n\nThe roots of Auto DevOps can be found in previous versions of GitLab which offered Auto Deploy. “We evolved [Auto DevOps] as the company evolved to have more and more capabilities around the DevOps lifecycle,” Mark explains. Today, Auto DevOps tackles 12 software development steps automatically. Customers wanting more flexibility can choose the [Composable Auto DevOps](/releases/2019/04/22/gitlab-11-10-released/#composable-auto-devops) option, where the template can easily be modified to suit the requirements.\n\n## The Auto DevOps process\n\nAuto DevOps begins with language detection using [Heroku buildpacks](https://devcenter.heroku.com/articles/buildpacks). While not all languages are supported, a build is created and tested automatically for the supported languages, Mark explains. Auto DevOps uses the open source version of [Code Climate](https://codeclimate.com/oss) to do code quality analysis and the results are displayed in the merge request when a change is made. After that, it’s time for security testing; including dependency scanning, license management, and container scanning. “All those things kick off again right from your first deploy,” Mark says. “We’re really taking shifting left seriously there.”\n\nAt this point developers are able to auto review applications. And once that review app is available Auto DevOps will kick off [dynamic application security testing (DAST)](https://docs.gitlab.com/ee/user/application_security/dast/). “It tries to detect security vulnerabilities in your running application,” Mark says. Finally Auto DevOps will auto deploy to either staging or production depending on how its configured. “From the first push it just automatically does all this stuff all the way – from deployment to production – which is pretty great.”\n\nAn app in production will get automatic browser performance testing which both challenges the application and records the results. [Auto monitoring](https://docs.gitlab.com/ee/topics/autodevops/#auto-monitoring) is also running so users can easily track response times, error rates, and even things like CPU and memory utilization. “All of this happens without any configuration whatsoever and that's really, that's why we put ‘auto’ in front of all of these,” Mark says. “It's really almost all the capabilities of our [DevOps lifecycle](/stages-devops-lifecycle/) thrown in by default.”\n\nWatch Mark demonstrate exactly how Auto DevOps works in the video below:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/pPRF1HEtQ3s\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nCover image by [Joshua Sortino](https://unsplash.com/@sortino) on [Unsplash](https://unsplash.com)\n{: .note}\n",[109,9,1158,1339],"production",{"slug":1341,"featured":6,"template":686},"auto-devops-explained","content:en-us:blog:auto-devops-explained.yml","Auto Devops Explained","en-us/blog/auto-devops-explained.yml","en-us/blog/auto-devops-explained",{"_path":1347,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1348,"content":1354,"config":1361,"_id":1363,"_type":14,"title":1364,"_source":16,"_file":1365,"_stem":1366,"_extension":19},"/en-us/blog/auto-devops-where-we-are-and-where-we-are-headed",{"title":1349,"description":1350,"ogTitle":1349,"ogDescription":1350,"noIndex":6,"ogImage":1351,"ogUrl":1352,"ogSiteName":670,"ogType":671,"canonicalUrls":1352,"schema":1353},"Auto DevOps: Where we are and where we are headed","As we will soon start an Auto DevOps design sprint, this article aims to summarize our current knowledge about Auto DevOps, and sets the stage for future discussions.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679516/Blog/Hero%20Images/gitlab-11-0-released-cover.jpg","https://about.gitlab.com/blog/auto-devops-where-we-are-and-where-we-are-headed","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Auto DevOps: Where we are and where we are headed\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2021-05-05\",\n      }",{"title":1349,"description":1350,"authors":1355,"heroImage":1351,"date":1357,"body":1358,"category":1359,"tags":1360},[1356],"Viktor Nagy","2021-05-05","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nAuto DevOps is an advanced GitLab feature-set that leverages GitLab's [single application](https://about.gitlab.com/handbook/product/single-application/) to assist users in every phase of the development and delivery process, implementing automatic tasks that can be customized and refined to get the best fit for their needs. The Three Ways of DevOps put forward a rigorous flow of work from development to production, short and amply feedback loops to fix quality issues early and avoid rework, and a culture of learning by balanced experimentation and repetition. GitLab, as a technological solution, can help with the first two ways, and can make the cultural changes less heavy and costly. Auto DevOps is our solution to support DevOps best practices with a convention over configuration approach.\n\n## A Bit of History\n\nAuto DevOps was released in GitLab 11.0, on 22. June 2018. From the very beginning, it included License Management, Security tests, and support for Kubernetes deployments. In the past years, the Auto DevOps offering was extended in many ways, and today [it ties together 15 stages](https://docs.gitlab.com/ee/topics/autodevops/stages.html) from building a project to deploying it. Besides being \"Auto\" as in automatic, it supports [a vast array of customization possibilities](https://docs.gitlab.com/ee/topics/autodevops/customize.html) too. \n\nThroughout these years, the goal of Auto DevOps remained the same: to simplify DevOps best practices adoption at every organisation.\n\n## Feedback we have heard\n\nAfter running a dozen interviews with our customers, I have noticed a few emerging patterns that I would like to share with you:\n\n### It's highly valued\n\nFirst of all, Auto DevOps is highly valued by our users. I talked with customers who were transitioning from legacy infrastructure to Kubernetes, and after 2 years of transitioning are looking forward to start using Auto DevOps with its built-in security scanners and review apps support. We have a great market to serve, and this is an amazing position to be at!\n\n### Auto DevOps utilized as exemplar templates\n\nFor various reasons, many customers find Auto DevOps to be unsuitable as-is. In these cases, it's considered as a set of GitLab CI templates that platform engineers can look at and learn from as they build out their own Auto DevOps forks. While we think it's great that these customers have found value in our Auto DevOps templates, we'd much rather create a solution that fulfills their needs without them having to write and maintain these templates forever.\n\n### Auto DevOps is slow\n\nAn often heard problem with Auto DevOps pipelines is that they are slow. Especially, its Auto Testing features, which end up getting switched off for this reason. One of the core principles of DevOps is to have a fast feedback loop; slow pipelines are counter to that principles and are therefore unacceptable. Our solution should accept this as a basic tenant and requirement.\n\n### Auto DevOps is hard to troubleshoot\n\nInherent in its name, it seems to be an automatic solution. While in actuality, it's a rather complex product with many pieces having to fit together just right to get it to work. As a result, if something goes wrong, then our users often turn to GitLab support for assistance. This is especially problematic, as erroneous configurations usually happen when Auto DevOps is tried out for the first time. Leading to a negative first impression. We should be able to provide a better experience by putting more effort into its onboarding flow.\n\n### Auto DevOps does not scale well\n\nMany GitLab users who claim that they use Auto DevOps actually use a forked version of it that incorporates custom CI templates, some Auto DevOps templates, and some custom logic too. In these situations, every new project created requires the redundant setup of these custom templates, because simply enabling Auto DevOps would only use the GitLab templates. This can be a problem in larger organizations, with dedicated platform teams, because they often have a requirement for standardization to simplify their engineering team's life. The current state of Auto DevOps does not serve this need well.\n\n### Auto DevOps targets only Kubernetes\n\nFor a long time, the only supported deployment target for Auto DevOps was Kubernetes. This has changed in the past year with her additional support of AWS EC2 and ECS. Nevertheless, we still do not support application stores for mobile development, simple package creation, Lambda function, etc. On one hand, Kubernetes already restricts Auto DevOps to a special set of companies where there is likely a central platform team, while on the other hand, we don't have support for platform team-level customizations. At the same time, we were missing support for the most common deployment targets without a platform team.\n\n## Who is the target user\n\nThe patterns show that there are two different user types (aka personas) for Auto DevOps:\n\n- There are the bigger companies with standardized processes and a dedicated platform team that owns these processes. This team is responsible for every developer teams' pipelines and their primary job is to allow developers to focus only on developing the business logic, instead of working on deployments and infrastructure. These platform teams require a product that enables them to apply special company policies automatically to every project that works. While Auto DevOps allows many customizations, those customizations are brittle to changes in GitLab, and we don't have special logic in using these customized setups automatically.\n\n- Then there are smaller companies, without a dedicated platform team. In these situations, usually, a lead engineer wears the platform engineer hat and takes care of the pipelines from the initial build through various scanners and non-production deployments to the final production deployment. These teams don't mind following our conventions as long as our solution doesn't require much time from them to get started while also supporting their deployment targets. Tools like Heroku, Vercel, or Netlify already paved the way for this approach.\n\n## What's next\n\nIn the past 3 years, we have learned a lot about our users, this allows us to take a new look at Auto DevOps, and see how we can best serve our users' current goals and needs. For this reason, we're planning on running a Design Sprint in the coming month to determine the new direction and come up with a solution that will help teams to more easily adopt DevSecOps best practices. While moving through this journey we would love for you to [join the discussion on the Reimagining Auto DevOps epic](https://gitlab.com/groups/gitlab-org/-/epics/5148)... in the meantime, thank you for reading and I hope you're all as excited as we are to move Auto DevOps back to the future!\n","unfiltered",[9,793],{"slug":1362,"featured":6,"template":686},"auto-devops-where-we-are-and-where-we-are-headed","content:en-us:blog:auto-devops-where-we-are-and-where-we-are-headed.yml","Auto Devops Where We Are And Where We Are Headed","en-us/blog/auto-devops-where-we-are-and-where-we-are-headed.yml","en-us/blog/auto-devops-where-we-are-and-where-we-are-headed",{"_path":1368,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1369,"content":1375,"config":1382,"_id":1384,"_type":14,"title":1385,"_source":16,"_file":1386,"_stem":1387,"_extension":19},"/en-us/blog/automate-to-accelerate-webcast-recap",{"title":1370,"description":1371,"ogTitle":1370,"ogDescription":1371,"noIndex":6,"ogImage":1372,"ogUrl":1373,"ogSiteName":670,"ogType":671,"canonicalUrls":1373,"schema":1374},"Testing & release automation: Accelerate development","If you’re not using automated testing, your competitors almost certainly are – catch up on our recent webcast to get started.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671288/Blog/Hero%20Images/gitlab-live-event.png","https://about.gitlab.com/blog/automate-to-accelerate-webcast-recap","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Automate to accelerate: What you need to know about test and release automation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2017-12-08\",\n      }",{"title":1376,"description":1371,"authors":1377,"heroImage":1372,"date":1379,"body":1380,"category":679,"tags":1381},"Automate to accelerate: What you need to know about test and release automation",[1378],"Rebecca Dodd","2017-12-08","\n\nBuild better software, faster, with test and release automation. Check out our recent webcast to discover why it's critical to your software development process.\n\n\u003C!-- more -->\n\nIt's been six years since Marc Andreessen's landmark \"[software is eating the world](https://www.wsj.com/articles/SB10001424053111903480904576512250915629460)\" claim, and we know now that he was on the money. Whether or not you consider yourself to be in the business of software, you are. Virtually all products and services today contain digital elements, and some component of your user experience will absolutely be online.\n\nWe've moved beyond software for manufacturing, to where 61 percent of financial services jobs are expected to be replaced by software in the 2030s. Every sort of job has the potential to be consumed by software — robo-advisors, truck drivers, grocery stockers, cashiers, and the list goes on.\n\nConsider [this statement made earlier this year by the Nvidia CEO](https://www.technologyreview.com/s/607831/nvidia-ceo-software-is-eating-the-world-but-ai-is-going-to-eat-software/): “Software is eating the world, but AI is eating software” – this puts a new software development issue in play just to stay competitive. The power of AI, when harnessed correctly, will change the landscape entirely yet again. Your key to effective AI may just well be adaptive Continuous Integration functionality.\n\nTo keep up, your release cycle needs to be efficient – we’re talking about when and how you distribute updates to your product. Enter release management.\n\nIn this webcast GitLab Senior Solutions Architect [Joel Krooswyk](/company/team/#JoelKroos) talks about:\n\n- Release management and how it's changing\n- Why automation is critical to test and release processes\n- Challenges of adopting test and release automation and how to overcome them\n- Unified continuous integration and continuous delivery\n\nAnd he demonstrates how to get started with test automation in no time at all with [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/).\n\n## Watch the recording\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/dvayJWwzfPY\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Grab the slides\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://docs.google.com/presentation/d/e/2PACX-1vTIAQe2m4mheFhuanNFJzqlY4TdVY3f2wR1wg7L1jVdYF5tL3D1ewo0a5DzUotdAZp5X16ypME200Ev/embed?start=false&loop=false&delayms=3000\" frameborder=\"0\" width=\"960\" height=\"569\" allowfullscreen=\"true\" mozallowfullscreen=\"true\" webkitallowfullscreen=\"true\">\u003C/iframe>\n\u003C/figure>\n",[9,1158,976,999],{"slug":1383,"featured":6,"template":686},"automate-to-accelerate-webcast-recap","content:en-us:blog:automate-to-accelerate-webcast-recap.yml","Automate To Accelerate Webcast Recap","en-us/blog/automate-to-accelerate-webcast-recap.yml","en-us/blog/automate-to-accelerate-webcast-recap",{"_path":1389,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1390,"content":1396,"config":1401,"_id":1403,"_type":14,"title":1404,"_source":16,"_file":1405,"_stem":1406,"_extension":19},"/en-us/blog/avoiding-devops-tax-webcast",{"title":1391,"description":1392,"ogTitle":1391,"ogDescription":1392,"noIndex":6,"ogImage":1393,"ogUrl":1394,"ogSiteName":670,"ogType":671,"canonicalUrls":1394,"schema":1395},"How to avoid the DevOps tax","Realize a faster DevOps lifecycle with these best practices for integration and automation – watch our recent webcast with guest speaker Forrester Senior Analyst Christoper Condo and GitLab Head of Product Mark Pundsack.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670214/Blog/Hero%20Images/devops-nova-scotia-cover.jpg","https://about.gitlab.com/blog/avoiding-devops-tax-webcast","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to avoid the DevOps tax\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2018-03-21\",\n      }",{"title":1391,"description":1392,"authors":1397,"heroImage":1393,"date":1398,"body":1399,"category":679,"tags":1400},[1378],"2018-03-21","\n\nWith the influx of DevOps-related products and services on the market, today’s application delivery toolchain has become complex and fragmented, resulting in more time spent on integrating tools instead of software innovation. Mark Pundsack, Head of Product at GitLab, and guest speaker Christopher Condo, Senior Analyst at Forrester, recently met to discuss the current state of [DevOps automation](/topics/devops/) and how IT leaders can unlock themselves from today’s toolchain to avoid the “DevOps tax.”\n\n\u003C!-- more -->\n\n- [What is the DevOps tax?](#what-is-the-devops-tax)\n- [What's in the webcast?](#whats-in-the-webcast)\n- [Watch the recording](#watch-the-recording)\n- [Key takeaways](#key-takeaways)\n\n## What is the DevOps tax?\n\nIn a typical DevOps toolchain, lots of different tools are tied together to deliver DevOps. You have different tools for planning, code creation, CI and security testing, packaging, release and deploy, configuration management, and monitoring.\n\nBut administrating all these products and connecting them together is complex. For example, your CI needs to talk to your version control, your code review, your security testing, your container registry, and your configuration management. The permutations are staggering, and it’s not just a one-time configuration – each new project needs to reconnect all these pieces together.\n\nThat's the DevOps tax: time spent on integrating and maintaining complicated toolchains, limiting your efficiency.\n\n## What's in the webcast\n\nBefore we dive into the DevOps tax and how to avoid it, we start by looking at digital transformation and current trends in DevOps, leading up to the DevOps tax, and then offering some best practices for reducing friction.\n\n## Watch the recording\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/iIElDMEC3U0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Key takeaways\n\n### The digital transformation imperative\n\n\u003Cdiv class=\"panel panel-default twitter-block\"> \u003Ca class=\"twitter-block-link panel-body\" href=\"http://twitter.com/share?text=%22When we think about digital transformation, we think about and talk about delivering value to your customer quickly, repeatedly, and with high quality%22 – guest @forrester via @GitLab webinar; url=/2018/03/21/avoiding-devops-tax-webcast/;hashtags=\" rel=\"nofollow\" target=\"_blank\" title=\"Tweet!\"> \u003Cspan class=\"twitter-text pull-left\"> \"When we think about digital transformation, we think about and talk about delivering value to your customer quickly, repeatedly, and with high quality\" – guest @forrester via @GitLab webinar\u003C/span> \u003Cspan class=\"click-to-tweet\"> Click to tweet! \u003Ci class=\"fab fa-twitter\">\u003C/i> \u003C/span> \u003C/a> \u003C/div>\n\n#### Customer experience is key\n\n>The people with the bad customer experience, their stock is lagging those companies that have an excellent customer experience. That's showing you that customer experience really matters - Christopher Condo\n\n#### Expect disruption\n\n>The common thread is placing the customer first. If there's a place where the customer's not being placed first, and some company can come along with an innovative way to do it, it seems like the government is open to it and customers are certainly open to it as well - Christopher Condo\n\n### Trends in DevOps\n\n#### Better integration of tools\n\n\u003Cdiv class=\"panel panel-default twitter-block\"> \u003Ca class=\"twitter-block-link panel-body\" href=\"http://twitter.com/share?text=%22You don't want people handcrafting all their tool chains all the time. You don't want a situation where every time an engineer changes teams he has to learn a whole new set of tools%22 – guest @forrester via @GitLab webinar;url=/2018/03/21/avoiding-devops-tax-webcast/;hashtags=\" rel=\"nofollow\" target=\"_blank\" title=\"Tweet!\"> \u003Cspan class=\"twitter-text pull-left\"> \"You don't want people handcrafting all their tool chains all the time. You don't want a situation where every time an engineer changes teams he has to learn a whole new set of tools\" – guest @forrester via @GitLab webinar \u003C/span> \u003Cspan class=\"click-to-tweet\"> Click to tweet! \u003Ci class=\"fab fa-twitter\">\u003C/i> \u003C/span> \u003C/a> \u003C/div>\n\n>I just ran a Wave on continuous integration tools and customers told us loud and clear that they are looking for a complete, integrated toolchain because they're tired of integrating their own toolchain. It's great to have the integrated tool chain but it comes at a cost - Christopher Condo\n\n#### Better integration of teams\n\n>They want to be able to check in with the security expert and say, \"Here's our design, here's our architecture, here's how we're handling these problems. What are we missing? What do we need to be doing next?\" All of those teams sort of act as shared resources, they don't act as blockers on a particular project - Christopher Condo\n\n#### Containers are critical\n\n\u003Cdiv class=\"panel panel-default twitter-block\"> \u003Ca class=\"twitter-block-link panel-body\" href=\"http://twitter.com/share?text=%22Containers allow folks to worry about what they're best at rather than trying to have everybody know everything – guest @forrester via @GitLab webinar; url=/2018/03/21/avoiding-devops-tax-webcast/; hashtags=\" rel=\"nofollow\" target=\"_blank\" title=\"Tweet!\"> \u003Cspan class=\"twitter-text pull-left\"> \"Containers allow folks to worry about what they're best at rather than trying to have everybody know everything\" – guest @forrester via @GitLab webinar \u003C/span> \u003Cspan class=\"click-to-tweet\"> Click to tweet! \u003Ci class=\"fab fa-twitter\">\u003C/i> \u003C/span> \u003C/a> \u003C/div>\n\n### What is the DevOps tax?\n\n>When it's a pain to integrate security, how many teams just don't bother? Or when it's a pain to share information between teams, how many organizations overcome that burden and find a way to work together? How much impact does this tax have on collaboration? With separate tools and separate processes, we're naturally encouraging separate silos where functional teams work in isolation - Mark Pundsack\n\n### Concurrent DevOps\n\n\u003Cdiv class=\"panel panel-default twitter-block\"> \u003Ca class=\"twitter-block-link panel-body\" href=\"http://twitter.com/share?text=%22When the entire DevOps lifecycle is seamless, magic starts to happen. Teams can work concurrently, not sequentially – @MarkPundsack via @GitLab;url=/2018/03/21/avoiding-devops-tax-webcast/;hashtags=\" rel=\"nofollow\" target=\"_blank\" title=\"Tweet!\"> \u003Cspan class=\"twitter-text pull-left\"> \"When the entire DevOps lifecycle is seamless, magic starts to happen. Teams can work concurrently, not sequentially\" – @MarkPundsack via @GitLab \u003C/span> \u003Cspan class=\"click-to-tweet\"> Click to tweet! \u003Ci class=\"fab fa-twitter\">\u003C/i> \u003C/span> \u003C/a> \u003C/div>\n\n### DevOps best practices\n\n- To maximize your digital transformation, you need to optimize your CI/CD pipeline, create integrated product teams, and modernize your application architecture with microservices and a cloud native approach.\n- Avoid the DevOps tax by reducing the number of integration points in your toolchain, integrate as deeply as you can, and strive for a single conversation across development, operations, security and business.\n- If you’re just getting started, start with continuous integration. Automating tests and building confidence in your code will pay dividends many times over.\n- If you already got CI, then move on to continuous delivery. Automate deployments and make them less scary. If you already started the DevOps transformation, then embrace the culture. You can only go so far when there’s a wall between dev and ops.\n\u003Cbr>\u003Cbr>\n",[9,999],{"slug":1402,"featured":6,"template":686},"avoiding-devops-tax-webcast","content:en-us:blog:avoiding-devops-tax-webcast.yml","Avoiding Devops Tax Webcast","en-us/blog/avoiding-devops-tax-webcast.yml","en-us/blog/avoiding-devops-tax-webcast",{"_path":1408,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1409,"content":1414,"config":1419,"_id":1421,"_type":14,"title":1422,"_source":16,"_file":1423,"_stem":1424,"_extension":19},"/en-us/blog/avoiding-foreclosure-on-your-technical-debt",{"title":1410,"description":1411,"ogTitle":1410,"ogDescription":1411,"noIndex":6,"ogImage":1193,"ogUrl":1412,"ogSiteName":670,"ogType":671,"canonicalUrls":1412,"schema":1413},"How to avoid foreclosure on your technical debt","There’s no need to be embarrassed — we all have technical debt. Here’s how you pay it off.","https://about.gitlab.com/blog/avoiding-foreclosure-on-your-technical-debt","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to avoid foreclosure on your technical debt\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Jeremiah\"}],\n        \"datePublished\": \"2019-04-29\",\n      }",{"title":1410,"description":1411,"authors":1415,"heroImage":1193,"date":1416,"body":1417,"category":679,"tags":1418},[1198],"2019-04-29","\n\nHow much debt can you afford? We all live with some form of debt, whether it’s a student loan, a car loan, a mortgage, or a credit card balance. Debt doesn’t have to be a bad thing. It can be a tool that gives us leverage and flexibility to make large purchases. But there are limits to how much debt is reasonable and that’s where people get into trouble. If they take on too much debt, bad things can happen. \n\nWhat does this have to do with GitLab? Everything.\n\n## What is technical debt?\n\nAccording to [Martin Fowler’s excellent summary on technical debt](https://martinfowler.com/bliki/TechnicalDebt.html), it seems that [Ward Cunningham coined the term](https://www.youtube.com/watch?v=pqeJFYwnkjE) around 1993 as a metaphor to describe a typical pattern that occurs on software projects. Technical debt is a pattern in which a development team does not have enough time, information, or capacity to refine and refactor their code, so their architecture, implementation, and testing may be incomplete. The challenge with technical debt is similar to financial debt in that it doesn’t magically go away. Unless it is managed and paid down, technical debt will grow over time, just like the balance on your credit card bill.\n\n## How to reduce technical debt\n\nYou may feel overwhelmed but there is a reason to be optimistic. The power of rapid, continuous delivery combined with small, incremental changes can help you manage your technical debt and avoid “foreclosure.” Here are three things you can do today to get your “technical finances” in order:\n\n1. **Find your technical debt and document it.**  It’s hard to pay off all your bills if you don’t know what they are. Begin this process by creating a list of issues that capture your specific technical “bills.” Assign them `technical debt` and `priority` labels. You probably won’t be able to pay them all off at one time, but now you know where to start.\n\n1. **Embrace small changes.**  At GitLab, we embrace [Minimum Viable Change (MVC)](https://handbook.gitlab.com/handbook/values/#minimal-viable-change-mvc). The goal of MVC is to make small, incremental improvements. Your goal is to pay down your debt one micro payment at a time.  \n\n1. **Let continuous delivery automate your payments.**  You can’t automate the improvements, but you can leverage CI/CD automation to streamline the process of testing, validating, and deploying code changes for you. Continuous delivery removes the friction and bottlenecks between your developers and the “bank.”  \n\nTechnical debt is a reality in almost every software product in the world. The point about technical debt isn’t how to avoid it, but how to _manage_ it so that you’re not in a situation where you are forced to foreclose on your project because the technical debt is out of control. The tools are readily available. The question is: Are you ready to start managing your technical debt? [Just commit](/blog/strategies-to-reduce-cycle-times/) to your future.\n",[9,683],{"slug":1420,"featured":6,"template":686},"avoiding-foreclosure-on-your-technical-debt","content:en-us:blog:avoiding-foreclosure-on-your-technical-debt.yml","Avoiding Foreclosure On Your Technical Debt","en-us/blog/avoiding-foreclosure-on-your-technical-debt.yml","en-us/blog/avoiding-foreclosure-on-your-technical-debt",{"_path":1426,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1427,"content":1433,"config":1438,"_id":1440,"_type":14,"title":1441,"_source":16,"_file":1442,"_stem":1443,"_extension":19},"/en-us/blog/aws-devsecops-competency-partner",{"title":1428,"description":1429,"ogTitle":1428,"ogDescription":1429,"noIndex":6,"ogImage":1430,"ogUrl":1431,"ogSiteName":670,"ogType":671,"canonicalUrls":1431,"schema":1432},"GitLab achieves the AWS DevSecOps Partner Competency Specialty","The AWS DevSecOps Partner Competency Specialty demonstrates that GitLab is instrumental in helping customers implement better security while continuing to innovate.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668799/Blog/Hero%20Images/securitylifecycle.png","https://about.gitlab.com/blog/aws-devsecops-competency-partner","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab achieves the AWS DevSecOps Partner Competency Specialty\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2023-09-25\",\n      }",{"title":1428,"description":1429,"authors":1434,"heroImage":1430,"date":1435,"body":1436,"category":769,"tags":1437},[1239],"2023-09-25","\nGitLab recently achieved AWS's DevSecOps Partner Competency desigation, a sub-specialty for the [AWS DevOps ISV Partner Competency](https://partners.amazonaws.com/partners/001E0000018YWFfIAO/GitLab,%20Inc) category. GitLab also holds the AWS DevOps ISV Partner Competency designation. AWS's partner qualification program signifies to customers that AWS has vetted GitLab's capabilities and use cases.\n\n> Attending [AWS re:Invent 2023](https://reinvent.awsevents.com/)? Find us at Booth 1152.\n\nAccording to AWS, solutions in the [DevSecOps category](https://aws.amazon.com/devops/partner-solutions/?blog-posts-cards.sort-by=item.additionalFields.createdDate&blog-posts-cards.sort-order=desc&partner-case-studies-cards.sort-by=item.additionalFields.sortDate&partner-case-studies-cards.sort-order=desc) \"make it easy for customers to integrate security across every stage of the development and delivery cycles, providing rapid and contextual feedback to development, security, and ops teams.\" The designation comprises a [validation checklist](https://apn-checklists.s3.amazonaws.com/competency/devops/technology/CenAm4qx8.html#competencyCategories) and attestation that GitLab's DevSecOps Platform meets AWS’s expectations.\n\n## GitLab's strength in DevSecOps\nGitLab's [AI-powered DevSecOps platform](https://about.gitlab.com/gitlab-duo/) helps organizations shift left on vulnerability remediation. At GitLab, shifting left means ensuring developers have a frictionless security defect remediation experience that enables them to immediately handle vulnerabilities in their code.\n\nGitLab's DevSecOps Platform:\n- surfaces security findings shortly after they are introduced and while the code is still being worked on\n- associates findings directly with those who changed the code\n- offers remediation guidance (including on-demand training and automated fixes)\n- supports rich, in-context collaboration for vulnerability management\n\n![GitLab + AWS Workflow](https://about.gitlab.com/images/blogimages/aws/devsecops-post/gitlabawsworkflow.png)\n\n\n![AWS Partner Logo](https://about.gitlab.com/images/blogimages/aws/devopsisvpartner.png){: .right}\n",[1243,282,9],{"slug":1439,"featured":6,"template":686},"aws-devsecops-competency-partner","content:en-us:blog:aws-devsecops-competency-partner.yml","Aws Devsecops Competency Partner","en-us/blog/aws-devsecops-competency-partner.yml","en-us/blog/aws-devsecops-competency-partner",{"_path":1445,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1446,"content":1452,"config":1458,"_id":1460,"_type":14,"title":1461,"_source":16,"_file":1462,"_stem":1463,"_extension":19},"/en-us/blog/battling-toolchain-technical-debt",{"title":1447,"description":1448,"ogTitle":1447,"ogDescription":1448,"noIndex":6,"ogImage":1449,"ogUrl":1450,"ogSiteName":670,"ogType":671,"canonicalUrls":1450,"schema":1451},"Battling toolchain technical debt","DevOps teams can hinder the software development lifecycles and application performance if they let their toolchains become unruly. Read how GitLab can help reduce that technical debt.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667845/Blog/Hero%20Images/gl15.jpg","https://about.gitlab.com/blog/battling-toolchain-technical-debt","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Battling toolchain technical debt\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2022-06-21\",\n      }",{"title":1447,"description":1448,"authors":1453,"heroImage":1449,"date":1455,"body":1456,"category":769,"tags":1457},[1454],"Sandra Gittlen","2022-06-21","\nDevelopers love their tools. Operations teams love their tools. And security teams love their tools. As Dev, Sec, and Ops consolidate onto a single DevOps platform, toolchain technical debt becomes exponentially more costly and complex.\n\n“Tools should be in the background enabling excellent development, operations, and security practices. However, DevOps teams are often led by their tools rather than the other way around and that can hinder all aspects of the software development lifecycle (SDLC),” says [Cindy Blake](https://gitlab.com/cblake), CISSP, director of product and solutions marketing at GitLab.\n\nAn April 2022 Gartner® report titled “Beware the DevOps Toolchain Debt Collector” notes that “many organizations find themselves with outdated, poorly governed, and unmanageable toolchains as they scale DevOps initiatives.”\n\nOne of the key findings, according to Gartner, is that “most organizations create homegrown toolchains, often leveraging the tools beyond their functional design. This not only leads to a fragmented toolchain, but also creates complications when tooling needs to be scaled, replaced, or updated.”\n\nToolchain technical debt introduces complexity as companies shift critical tasks such as reliability, governance, and compliance left in the SDLC.\n\n> Discover how GitLab 15 can help your team deliver secure software, while maintaining compliance and automating manual processes.\nSave the date for our GitLab 15 [launch event](https://page.gitlab.com/fifteen) on June 23rd!\n\n## No time for technical debt\n\nFew DevOps teams give toolchain upkeep the time and attention it requires. According to [GitLab’s 2021 DevSecOps\nsurvey](/images/developer-survey/gitlab-devsecops-2021-survey-results.pdf), nearly two-thirds of survey respondents, 61%, said they spend 20% or less of their time on toolchain integration and maintenance each month.\n\n“Developers face challenges and time constraints while maintaining these complex, stand-alone tool siloes, building fragility and technical debt that the [infrastructure and operations] leader has to deal with,” Gartner states. The research firm adds, “These outdated toolchains further increase overhead costs, magnify technical risks, add operational toil, and limit business agility.”\n\nBlake agrees: “Complex toolchains inhibit the ability to govern the software development and deployment process. Policies must be managed across tools and visibility into code changes and changes to its surrounding infrastructure become difficult to see and track. Time is wasted on managing the toolchain instead of value-added work.”\n\n## Getting purpose-driven\nThe remedy to toolchain sprawl and subsequent debt is to change strategy. Instead of putting energy into figuring out how to maintain one-off tools, DevOps teams should focus on how to enable processes and policies that support simplicity, control, and visibility across the SDLC.\n\n“These are the characteristics needed to meet reliability, governance, and compliance demands. A united platform like GitLab helps you do that,” Blake says.\n\nGartner states: “Successful infrastructure and operations leaders reduce technical debt and sustainably scale DevOps toolchain initiatives across the organization by using a prioritized, iterative strategy that minimizes friction in making changes to toolchains and more quickly delivers customer value.”\n\nAdopting a purpose-built platform instead of a complex and ad-hoc toolchain also eases an organization’s ability to automate the SDLC. “Automation abstracts complexity away from the developer and provides guard rails so DevOps teams gain greater efficiency, accuracy, and consistency,” Blake says. In addition, automation reduces the audit footprint in terms of what needs oversight and inspection.\n\nPlatforms also support automation throughout operations, including building and\ntesting infrastructure as code, so that “you can eliminate the variables when you’re trying to debug an application,” she says. This speeds troubleshooting response times and reduces application downtime.\n\nFor instance, GitLab, the One DevOps Platform, features [dependency\nlists](https://docs.gitlab.com/ee/user/application_security/dependency_list/), also known as software bill of materials (SBOM), that show which dependencies were used and help to identify where problems exist. “GitLab also helps you avoid problems altogether by consistently scanning dependencies according to policies and compliance standards that the platform provides,” Blake says. DevOps teams can easily see what changes were made when and by whom. “That visibility is critical when trying to resolve issues and prevent them from happening again,” she says.\n\n## Reclaim your DevOps team’s time\nBy adopting a single DevOps platform, organizations can reclaim developer, security, and operations time that has been spent stitching tools together or optimizing for one developer’s tool, and then backtracking through toolchains when an application breaks because those tools can’t co-exist.\n\n“DevOps teams have a lot on their plates and trying to manage unruly toolchains is simply a waste of time. You should be creating state-of-the-art software, not manually integrating and maintaining legacy tools,” Blake says.\n\nShe emphasizes that GitLab is not “rip and replace”; it’s a platform where everything needed for DevOps comes together in one place. IT leadership benefits from this united approach as well. [Value stream\nanalytics](/solutions/value-stream-management/) provide insight into your end-to-end software throughput, helping optimize IT resources most efficiently and enabling a flexible, responsive business outcome. “We meet DevOps teams where they are and put the user – whether they be a developer, operations, or security professional – in the center of the platform,” she says.\n\n[Try GitLab Ultimate for free](/free-trial/\n) for 30 days.\n\n_GARTNER is a registered trademark and service mark of Gartner, Inc. and/or its affiliates in the U.S. and internationally and is used herein with permission. All rights reserved._\n",[9,1040,683],{"slug":1459,"featured":6,"template":686},"battling-toolchain-technical-debt","content:en-us:blog:battling-toolchain-technical-debt.yml","Battling Toolchain Technical Debt","en-us/blog/battling-toolchain-technical-debt.yml","en-us/blog/battling-toolchain-technical-debt",{"_path":1465,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1466,"content":1472,"config":1478,"_id":1480,"_type":14,"title":1481,"_source":16,"_file":1482,"_stem":1483,"_extension":19},"/en-us/blog/beginner-guide-ci-cd",{"title":1467,"description":1468,"ogTitle":1467,"ogDescription":1468,"noIndex":6,"ogImage":1469,"ogUrl":1470,"ogSiteName":670,"ogType":671,"canonicalUrls":1470,"schema":1471},"GitLab’s guide to CI/CD for beginners","CI/CD is a key part of the DevOps journey. Here’s everything you need to understand about this game-changing process.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681391/Blog/Hero%20Images/beginnercicd.jpg","https://about.gitlab.com/blog/beginner-guide-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab’s guide to CI/CD for beginners\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-07-06\",\n      }",{"title":1467,"description":1468,"authors":1473,"heroImage":1469,"date":1474,"body":1475,"category":791,"tags":1476},[851],"2020-07-06","\n\nContinuous integration and [continuous delivery/deployment](/topics/continuous-delivery/) (most often referred to as CI/CD) are the cornerstones of [DevOps](/topics/devops/) and any modern software development practice. Here’s everything you need to know about [CI/CD for beginners](/blog/how-to-keep-up-with-ci-cd-best-practices/).\n\n## What CI/CD means\n\nIf your software development process involves a lot of stopping, starting and handoffs, [CI/CD](/topics/ci-cd/) may be just what you’re looking for. A CI/CD pipeline is a seamless way for developers to make changes to code that are then automatically tested and pushed out for delivery and deployment. The goal is to eliminate downtime. Get CI/CD right and you’re well on the road to successful DevOps and dramatically faster code release. In our [2020 Global DevSecOps Survey](/blog/devsecops-survey-released/), nearly 83% of survey takers said they’re getting code out the door more quickly thanks to DevOps.\n\n## Understand CI/CD basics\n\nIf you’re not sure what a pipeline is, or how the entire process works, here’s a [detailed explanation](/blog/a-beginners-guide-to-continuous-integration/) of how all the moving parts work together to make software development quicker and easier.\n\n## Four benefits of CI/CD\n\nYes, CI/CD helps speed up delivery of code but it also makes for happier software developers. At a time when there continues to be [a worldwide shortage of software developers](https://www.gartner.com/en/newsroom/press-releases/2019-01-17-gartner-survey-shows-global-talent-shortage-is-now-the-top-emerging-risk-facing-organizations), it’s critical to retain technical talent. Developer job satisfaction is just one of [four key benefits](/blog/positive-outcomes-ci-cd/) that come from implementing a CI/CD process.\n\n## How to pick the right CI/CD tool\n\nNow that you’re sold on the [benefits of CI/CD](/topics/ci-cd/benefits-continuous-integration/) it’s time to choose a tool. There are a number of considerations, from [budget to room for growth](/topics/ci-cd/choose-continuous-integration-tool/) so it’s worth taking the time to think it through.\n\n## How to make the business case for CI/CD\n\nTo tie a CI/CD process to ROI isn’t difficult, but it’s an important step to take to get management buy-in. Here are [three factors to consider](/blog/modernize-your-ci-cd/) – including the hidden cost of toolchain sprawl – as you make the case for CI/CD.\n\n## Take 20 minutes and build a CI/CD pipeline\n\nOk, enough talking about theoreticals... it’s time to do something. Using GitLab’s [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) functionality, you can [move from code to production](/blog/building-a-cicd-pipeline-in-20-mins/) in just two simple steps and in only 20 minutes (no, really, just 20 minutes).\n\n## Next stop: Kubernetes!\n\nFinally, you can tie your GitLab CI pipeline into Google Kubernetes Engine (GKE) and as a bonus it takes only 15 minutes. Our [step-by-step tutorial](/blog/gitlab-ci-on-google-kubernetes-engine/) is completely beginner-friendly.\n\n**Level up your CI/CD knowledge:**\n\n[How CI can put the \"Sec\" in DevSecOps](/blog/solve-devsecops-challenges-with-gitlab-ci-cd/)\n\n[Autoscale GitLab CI with AWS Fargate](/blog/introducing-autoscaling-gitlab-runners-on-aws-fargate/)\n\n[Get started with parent-child pipelines](/blog/parent-child-pipelines/)\n\nCover image by [Kyle Glenn](https://unsplash.com/@kylejglenn) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[109,9,1477],"kubernetes",{"slug":1479,"featured":6,"template":686},"beginner-guide-ci-cd","content:en-us:blog:beginner-guide-ci-cd.yml","Beginner Guide Ci Cd","en-us/blog/beginner-guide-ci-cd.yml","en-us/blog/beginner-guide-ci-cd",{"_path":1485,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1486,"content":1492,"config":1497,"_id":1499,"_type":14,"title":1500,"_source":16,"_file":1501,"_stem":1502,"_extension":19},"/en-us/blog/beginner-guide-python-programming",{"title":1487,"description":1488,"ogTitle":1487,"ogDescription":1488,"noIndex":6,"ogImage":1489,"ogUrl":1490,"ogSiteName":670,"ogType":671,"canonicalUrls":1490,"schema":1491},"How to get started with Python programming","Python is increasingly popular, and for good reason. Here's our beginner's guide.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664962/Blog/Hero%20Images/python.jpg","https://about.gitlab.com/blog/beginner-guide-python-programming","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to get started with Python programming\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2021-10-21\",\n      }",{"title":1487,"description":1488,"authors":1493,"heroImage":1489,"date":1494,"body":1495,"category":769,"tags":1496},[745],"2021-10-21","Are you a programming enthusiast who wants to learn Python? Are you new to coding? Do you need help deciding where to begin with Python? If you are looking for answers to these questions, then you are in the right place.\n\n## How to start writing code with Python\n\nPython is an easy-to-learn, easy-to-use and easy-to-deploy programming language, with rampant usage in building web and desktop applications, analyzing data and performing [DevOps](https://about.gitlab.com/topics/devops/) tasks. It is a free, open-source, object-oriented coding language used to write simple scripts and complex programs. Of the almost 700 programming languages, Python is considered one of the best to learn first.\n\n## Installing Python\n\nBefore discussing the basics of Python, it is essential to download and install Python on your desktop/laptop. Python works on multiple platforms, including Linux, Windows and Mac. It comes preinstalled on most Mac and Linux systems; however, you should download the latest version from the official Python website.\n\nTo check the current Python version on your system, open the command line and type “python -V”. \n\n![command prompt](https://about.gitlab.com/images/blogimages/python1.png){: .shadow}\n\nIf you have an outdated version, download either the 32- or 64-bit setup from the website based on your system requirements.\n\nThere are other alternatives for downloading the setup: for Windows, you can install it directly from Microsoft. For Linux, install it using the package manager. For macOS, you can download it from Homebrew.\n\nOnce the setup is downloaded, run the file installer, and click on “Install Now”. Once the installation is complete, you are ready to go. Below is an example of a Python installation for Windows.\n\n![install Python](https://about.gitlab.com/images/blogimages/python2.png){: .shadow}\n\n## Running Python in command prompt\n\nTo verify Python is installed and working correctly in Windows, open the command prompt and enter “python”, which will invoke the interpreter. You can directly execute Python codes in it.  For example, type “2*5+1” and press “enter”. You will see “11” as the output. Entering “quit ()” will exit the interpreter.\n\n![Python interpreter](https://about.gitlab.com/images/blogimages/python3.png){: .shadow}\n\n## Running Python in IDE\n\nWith the latest Python installed, you are now ready to start programming in Python. When writing long scripts or programs in Python, use Python’s built-in Integrated Development and Learning Environment (IDLE).\n\nStart the IDLE and then, from the File dropdown, select “New File”, which opens a new editing window. So now, on your screen, you have two windows: a Python shell and an untitled file.\n\n![Python shell and untitled file](https://about.gitlab.com/images/blogimages/python4.png){: .shadow}\n\nThe Python shell is a REPL environment, which is shorthand for \"read-eval-print loop\". It runs snippets of the code, usually one statement at a time. For example, by repeating the same calculation “2*5+1” that we did in the command prompt, you can see how a Python shell can function as a calculator.\n\n![Python as a calculator](https://about.gitlab.com/images/blogimages/python5.png){: .shadow}\n\nThe untitled window is a text editing window for writing complete programs. The shell displays its output. For example, the conventional first program of Python for beginners is printing “Hello World!”. Make sure you save the text editor before running it by pressing “F5”.\n\n![Hello World](https://about.gitlab.com/images/blogimages/python61.png){: .shadow}\n\n## The basics of Python\n\nWe know you can’t wait to start writing long scripts for games and websites, but you still have a long way to get there. Just like with learning any other language, you must first understand the basics of Python. \n\nThe **print()** function, as seen in the Hello World! example, prints a value on the output window. A value is the most basic thing a program uses. It can be a string, a numeric value or any other Python object. Any object within single/double quotations is called a string. For instance, the “Hello World!” that is printed in the above program is also of the type string. Numeric values like 4 and 4.5 are the types of integers and floats, respectively. You can change an integer or float into a string and vice versa using the built-in functions **int()**, **float()** and **str()**.\n\n![value in an output window](https://about.gitlab.com/images/blogimages/python7.png){: .shadow}\n\n## Python’s vocabulary\n\nPython is the simplest coding language. It is easy to read and understand. Unlike human languages, Python has a small vocabulary or reserved words holding special meaning. Terms other than this reserved vocabulary hold meaning only to you and are called variables. These 35 reserved words are:\n\n![Python terms](https://about.gitlab.com/images/blogimages/python8.png){: .shadow}\n\nMake sure you use these words for their specified purpose to avoid confusing the Python interpreter and causing a syntax error.\n\n### Naming variables\n\nSometimes you want to store values in your code for retrieving them later, which you can do by giving them symbolic names called variables. As seen below, we ask Python to store 5 and 6 with labels x and y, respectively, and then retrieve them later to find their sum.\n\n![storing variables](https://about.gitlab.com/images/blogimages/python9.png){: .shadow}\n\nThere are rules for choosing a name for a variable; failing to follow these gives a syntax error. A few mandatory rules are narrated below:\n\n1. The name can contain both letters and numbers, but it can’t start with a number.\n1. An underscore can appear in the name to separate multiple words.\n1. Special symbols like @#$ are illegal and should not appear in the name.\n1. Python keywords should not be used as names for variables.\n\n### Understanding operators and operands\n\nPython uses special symbols called “operators” for representing basic mathematical computation. The values to which these operators are applied are called operands. The symbols used as operators for subtraction, addition, division, multiplication and exponentiation are  -,+, /, * and **, respectively. \n\n![symbols for operators](https://about.gitlab.com/images/blogimages/python10.png){: .shadow}\n\nThe modulus operator (%) outputs the remainder of the first operand divided by the second operand. It is useful in checking whether a number is divisible by another and extracting the rightmost digit/digits of a number.\n\n![modulus operator](https://about.gitlab.com/images/blogimages/python11.png){: .shadow}\n\n### Using expressions\n\nA combination of values, variables and operators is called an expression. An expression typed in the shell gets evaluated, and the answer is displayed. However, in a script, an expression doesn't do anything on its own.\n\nPython uses the mathematical convention PEMDAS for the operators, which means that P for Parentheses has the highest precedence, then Exponentiation, Multiplication and Division, which have the same priority. Addition and Subtraction come next and also have the same precedence. Operators that have the same preference are also evaluated from left to right.\n\n![PEMDAS](https://about.gitlab.com/images/blogimages/python12.png){: .shadow}\n\nThe Addition and Multiplication operators also work with strings for concatenation and repeating a string, respectively.\n\n![addition and multiplication operators](https://about.gitlab.com/images/blogimages/python13.png){: .shadow}\n\nPython also allows you to take the value for a variable from the user via their keyboard. This can be done using a built-in function called **input**.\n\n![input](https://about.gitlab.com/images/blogimages/python14.png){: .shadow}\n\n## Write your first program\n\nNow it's time to write a short program using everything you've learned here. Write a script that takes two numbers as input and adds them. Do this on your own and see the code below to tally your work.\n\n![write a short program](https://about.gitlab.com/images/blogimages/python15.png){: .shadow}\n\n**Congratulations!** You just wrote your first program.\n\nLearning Python is easy and fun. We just helped you make it through the basics. To become a professional Python Programmer, you still have a lot to learn and practice. Good luck on your journey to becoming an expert coder.\n\nPhoto by \u003Ca href=\"https://unsplash.com/@davidclode?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">David Clode\u003C/a> on \u003Ca href=\"https://unsplash.com/s/photos/python?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Unsplash\u003C/a>",[9,813,978],{"slug":1498,"featured":6,"template":686},"beginner-guide-python-programming","content:en-us:blog:beginner-guide-python-programming.yml","Beginner Guide Python Programming","en-us/blog/beginner-guide-python-programming.yml","en-us/blog/beginner-guide-python-programming",{"_path":1504,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1505,"content":1511,"config":1516,"_id":1518,"_type":14,"title":1519,"_source":16,"_file":1520,"_stem":1521,"_extension":19},"/en-us/blog/best-advice-for-your-devops-career-keep-on-learning",{"title":1506,"description":1507,"ogTitle":1506,"ogDescription":1507,"noIndex":6,"ogImage":1508,"ogUrl":1509,"ogSiteName":670,"ogType":671,"canonicalUrls":1509,"schema":1510},"Best advice for your DevOps career? Keep on learning","If you want a new job, or a higher salary, or preferably both, add some skills to your DevOps resume. Here's a look at our strategy for DIY-ing your continuing ed.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679473/Blog/Hero%20Images/designing-in-an-all-remote-company.jpg","https://about.gitlab.com/blog/best-advice-for-your-devops-career-keep-on-learning","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Best advice for your DevOps career? Keep on learning\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2021-11-09\",\n      }",{"title":1506,"description":1507,"authors":1512,"heroImage":1508,"date":852,"body":1513,"category":679,"tags":1514},[810],"\nDevOps skills might be in demand, but it’s not the time to remain complacent if you want a new (and better) job or a higher salary. Luckily the best career move you can make is also the easiest: continue to add new skills, even if you have to DIY it.\n\n“I think continually educating myself has been really important in my career, and it’s been mostly DIY,” said said [Brendan O’Leary](https://gitlab.com/brendan), a staff developer evangelist, and product and engineering leader at GitLab. “It’s allowed me to make different career moves and advance my career by changing companies or by changing roles at my current company… Continuing to educate yourself is one of the most important things you can do.”\n\n## DevOps education: how to keep learning\n\nIt’s well known that continuing to educate yourself and pursuing certifications are two of the [top ways to increase your paycheck](/blog/four-tips-to-increase-your-devops-salary/), but here’s our best advice on how to bootstrap your learning journey without waiting for your employer.\n\n### Take responsibility for your own journey\n\nDon’t panic if your company is one of the many that doesn’t offer continuous education opportunities: According to the DevOps Institute’s 2021 Enterprise DevOps Skills Report, [52 percent of companies don’t](https://learn.gitlab.com/devops-institute/2021-doi-devops-upskilling-report?utm_medium=email&utm_source=marketo&utm_campaign=devopsgtm&utm_content=doi-devops-upskilling-report). (To be transparent, GitLab was one of the partners in the Institute’s survey.)\n\n### Figure out what you, and your company, need\n\nMake sure you’re not learning about a new technology or tool because it’s the cool new thing. Focus your time and energy on learning something that actually will solve a problem or give your business a competitive edge. Keep your skills aligned with shifting business demands, learning enough about a new technology so you understand if it will solve a business problem.\n\nIn a sea of possibilities, there are some concrete learning options we can suggest. In our [2021 Global DevSecOps Survey](/developer-survey/), we asked respondents what skill or skills would be most important for their future career. A majority of developers said knowledge around artificial intelligence and machine learning would be critical, while ops team members wanted more advanced programming languages. Security pros, on the other hand, wanted to become subject matter experts in their industries.\n\n### Assessing your skills and deficits\n\nGauge your baseline of skills, experience and certifications. What comes naturally to you, and what is more of a struggle? Now compare your baseline to what your company needs, and then broaden it out to what the industry is looking for.\n\nOne easy way to broadly compare your skills to others is to look at a job search site like [Glassdoor.com](https://www.glassdoor.com). The job listings detail the skills, languages, experiences, technologies and other attributes an employer is looking for.\n\nWe randomly grabbed and anonymized a job posting for a DevOps engineer from Glassdoor, below. You’ll see how many boxes you’ll need to check (we bolded the key phrases just to make the point):\n\n_You will demonstrate a **leadership** mindset, solid **operational experience**, and the **ability to problem-solve**. Additionally, you should have exceptional **communication skills**, be knowledgeable about the latest industry trends, and be **highly innovative**. The DevOps Engineer will help enhance and maintain a **programmable infrastructure, configure, implement, debug and document new and existing applications running on Linux and Windows operating systems in private and public cloud infrastructures**. Engage in **design, development, installation, and system administration of build/continuous integration systems, anti-virus systems, and configuration management systems**. Participate in the full development life cycle of DevOps projects including **assessment of requirements, system analysis, and design.**_\n\n### Go to the source for certifications\n\nOf course, there are university classes but they can be pricey. You don’t always have to spend thousands of dollars on a college course. Go to the original source of what you want to learn, and let certifications be your friend. [A survey from the McKinsey Quarterly,](https://www.mckinsey.com/business-functions/mckinsey-accelerate/our-insights/five-fifty-the-skillful-corporation?cid=fivefifty-eml-alt-mkq-mck&hlkid=a7a8ae1b68574d02b81db1f1eeb8fd8d&hctky=12428831&hdpid=8233aa33-5ff4-4450-a4c7-2f47dfeaf9d0) noted that 66 percent of survey respondents called certifications “extremely valuable.”\n\nFor instance, if you’re using The GitLab Platform, you can get a [security certification](/services/education/gitlab-security-specialist/) from GitLab. There are also [certifications for](/learn/certifications/public/) everything from CI/CD training, to project management and Git basics. Similarly, if you need to bone up on Google Cloud, check out their site for [certifications](https://acloudguru.com/training-library/gcp-cloud-training?utm_campaign=11244863417&utm_source=google&utm_medium=cpc&utm_content=469352928666&utm_term=b_&adgroupid=115625160932&gclid=Cj0KCQjw5oiMBhDtARIsAJi0qk20jsoQ55oCnlbde3tozrDRExDxxiJ0AooFulqXXguwOX072-OwJNAaAjd3EALw_wcB).\n\n### Other opportunities to educate yourself\n\nYou also can find learning opportunities at a lot of conferences, coding events, bootcamps, hackathons and workshops. Especially in the time of COVID-19, think about taking advantage of online courses. [YouTube](https://www.youtube.com) is full of hands-on technical tutorials, including a lot from GitLab and other tech companies as well as consultants and individual contributors. Don’t forget GitLab Learn, where you can do a self-paced deep dive via video tutorials into a number of key DevOps areas, including [continuous integration (CI)](/solutions/continuous-integration/).\n\nAnd for female developers, organizations like [Women Who Code](https://www.womenwhocode.com/) offer scholarships, tutorials and educational materials.\n\nDon’t forget about mentorships. Find someone who has the knowledge and experience you need and ask them to work with you and bring you up to speed. Then don’t forget to later turn around and lend a hand to the person coming up after you.\n\nStay tuned for more information on what hard and soft skills you should consider adding to your resume.\n\n## Read more on DevOps careers:\n\n- [6 tips to make software developer hiring easier](/blog/6-tips-to-make-software-developer-hiring-easier/)\n\n- [Four tips to increase your DevOps salary](/blog/four-tips-to-increase-your-devops-salary/)\n\n- [DevOps salaries in 2021: Where do you rank?](/blog/a-look-at-devops-salaries/)\n\n- [Have DevOps jobs to fill? Try these 3 strategies to hire and retain](/blog/have-devops-jobs-to-fill-try-these-3-strategies-to-hire-and-retain/)\n",[813,9,1515],"contributors",{"slug":1517,"featured":6,"template":686},"best-advice-for-your-devops-career-keep-on-learning","content:en-us:blog:best-advice-for-your-devops-career-keep-on-learning.yml","Best Advice For Your Devops Career Keep On Learning","en-us/blog/best-advice-for-your-devops-career-keep-on-learning.yml","en-us/blog/best-advice-for-your-devops-career-keep-on-learning",{"_path":1523,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1524,"content":1530,"config":1535,"_id":1537,"_type":14,"title":1538,"_source":16,"_file":1539,"_stem":1540,"_extension":19},"/en-us/blog/better-devops-with-gitlab-ci-cd",{"title":1525,"description":1526,"ogTitle":1525,"ogDescription":1526,"noIndex":6,"ogImage":1527,"ogUrl":1528,"ogSiteName":670,"ogType":671,"canonicalUrls":1528,"schema":1529},"Unlock better DevOps with GitLab CI/CD","Why a single application helps to eliminate silos and knowledge gaps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670652/Blog/Hero%20Images/dev-to-devops-cover.png","https://about.gitlab.com/blog/better-devops-with-gitlab-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Unlock better DevOps with GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-10-18\",\n      }",{"title":1525,"description":1526,"authors":1531,"heroImage":1527,"date":1532,"body":1533,"category":679,"tags":1534},[788],"2019-10-18","\nWe’ve talked about how the [seamless collaboration between Development and IT operations is a beautiful thing](/topics/devops/build-a-devops-team/). When an organization has a healthy DevOps culture, they’re able to meet business objectives and increase delivery speed. DevOps is meant to eliminate silos so everyone can get on the same page, and the tools you use can play a big role in just how successful, or unsuccessful, your DevOps strategy is.\n\n## Complicated tools create silos\n\nOne of the ways that operations can be at a disadvantage is by having to maintain a [complicated plug-in environment](/blog/plugin-instability/). This scenario becomes especially problematic when things go wrong and developers are relying on a specific group to fix the problem. While specialization isn’t necessarily a bad thing (devs shouldn’t have to do ops, and vice versa), usually the expertise needed to manage a plugin environment is a specialization within an already specialized group.\n\nJenkins is the most popular example of this kind of complexity, for a few reasons:\n\n*   **Jenkins architecture requires maintaining a large set of build environment systems**: At scale, this requires many dedicated people to manage machines, install and manage build tools (NodeJS, Python, Java, et al.), monitor machines, etc.\n\n*   **Upgrading is a risk (Jenkins or plug-ins)**: There is a good chance that upgrades can cause processes to fail, leading to broken builds or downtime.\n\n*   **Groovy is hard to maintain**: This isn't a widely popular script language, so it is harder to find experts to manage it and it's hard to debug due to a lack of debuggers.\n\n*   **Jenkins does not support any kind of clustering or failover**: The web UI is run on a web container known as Jenkins master, and you can only have one. For a large team of developers needing to use Jenkins all at once, that one instance needs to be very closely monitored with limited permissions.\n\nA large Jenkins plug-in environment creates silos within silos and knowledge gaps that are hard to overcome. What this leads to is a “throw it over the wall” team dynamic: Because the system depends on the expertise of a very limited number of people, developers have to submit code and hope their experts have the skills to manage it.\n\n## Lack of visibility keeps teams in the dark\n\nIn order for [DevOps](/topics/devops/) to thrive there needs to be an understanding of what every team is doing and clarity around processes. Unfortunately, a tool like Jenkins doesn’t necessarily facilitate this. Because users can’t see other users’ commits, they can’t visualize the SDLC as a whole. This only isolates teams even further.\n\nTeams that work within this plug-in environment often download the plug-ins they need, which makes it hard for Jenkins admins to standardize across teams. That, in turn, makes it harder for admins to manage the dependencies and maintain plug-ins properly, which can lead to more broken builds.\n\nWhile plug-ins are a common way to add functionality into a toolchain, it doesn’t address the problems of a toolchain that hinder teams trying to implement DevOps:\n\n*   Lack of visibility\n*   Knowledge gaps\n*   Work silos\n\n## Why single application CI/CD makes better DevOps\n\nAs a complete [DevOps platform](/solutions/devops-platform/) delivered as a single application, we provide a tool that covers all parts of the SDLC from one interface. CI and CD are just one part of the lifecycle, and by having functionality like [SCM, Issue tracking, Security testing, and Monitoring](/devops-tools/jenkins-vs-gitlab/) built right in, we’re making it easier for teams to work with DevOps best practices.\n\nIf you would like to see a demo of GitLab CI/CD and how we compare to Jenkins, and access other curated content around CI/CD, you can watch our most recent webcast.\n\n[Watch the demo.](/blog/migrating-from-jenkins/)\n{: .alert .alert-gitlab-purple .text-center}\n",[109,683,9],{"slug":1536,"featured":6,"template":686},"better-devops-with-gitlab-ci-cd","content:en-us:blog:better-devops-with-gitlab-ci-cd.yml","Better Devops With Gitlab Ci Cd","en-us/blog/better-devops-with-gitlab-ci-cd.yml","en-us/blog/better-devops-with-gitlab-ci-cd",{"_path":1542,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1543,"content":1548,"config":1553,"_id":1555,"_type":14,"title":1556,"_source":16,"_file":1557,"_stem":1558,"_extension":19},"/en-us/blog/beyond-application-modernization-trends",{"title":1544,"description":1545,"ogTitle":1544,"ogDescription":1545,"noIndex":6,"ogImage":1193,"ogUrl":1546,"ogSiteName":670,"ogType":671,"canonicalUrls":1546,"schema":1547},"Beyond trends: Committing to application modernization","How to overcome analysis paralysis and take your digital transformation efforts from theory to practice.","https://about.gitlab.com/blog/beyond-application-modernization-trends","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Beyond trends: Committing to application modernization\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Erica Lindberg\"}],\n        \"datePublished\": \"2019-02-25\",\n      }",{"title":1544,"description":1545,"authors":1549,"heroImage":1193,"date":1550,"body":1551,"category":679,"tags":1552},[676],"2019-02-25","\n\nJust commit. What’s so hard about that? In truth, there’s a reason why commitment phobia is a punchline and it’s tough to settle on a place to go to dinner, let alone make a critical choice like when or [how to start the application modernization process](/blog/application-modernization-best-practices/).\n\nFor starters, there are so many questions to ask. For example:\n\n  1. What is the status quo of each software initiative?\n  1. Which applications are driving value for the business? Which aren’t?\n  1. When and how should I break my monolith into microservices? What’s the risk?\n  1. Should I move to the cloud – private, public, hybrid?\n  1. Everyone is talking about containers and Kubernetes, do I need this?\n\nThis is by no means an exhaustive list, but a sample of what might come up when considering where and how to start a digital transformation journey. Questions, buzzwords, and trends abound, and it can be easy to get trapped by analysis paralysis until enough time has gone by that indecision has become the decision.\n\nAccording to [Forrester’s Predictions 2019](https://go.forrester.com/blogs/tag/predictions-2019/), 25 percent of firms will decelerate digital efforts in 2019. For many organizations, slowing the pace of innovation directly results in lost market share due to more nimble competitors entering their space.\n\n> “In 2019, digital transformation moves from super-wide enterprise efforts to a pragmatic, surgical portfolio view of digital investments with the goal of making incremental and necessary changes to operations. – Forrester Predictions 2019\n\nThe key to starting and committing to the application modernization process is to start small and scale up as you learn. Following trends is not going to bring the organizational change needed for a successful digital transformation. It takes practical, incremental, and iterative progress.\n\nHere are a few practical steps for getting started:\n\n## 1. Start small with a small team or innovation group and scale up from there.\n\nTrying to make a decision on how to proceed with digital transformation across your entire organization is a monumental task. You risk introducing a lot of variable change all at once that can turn chaotic if not managed well. Starting with a small team or innovation group reduces the stress and minimizes the initial impact of getting started. [Behavioral science experts call this the “pick one and go” method](https://bsci21.org/9-tips-to-avoid-paralysis-by-analysis/) for overcoming analysis paralysis. Essentially, if you are overwhelmed or unsure about all of your options, just pick one and try it. Collect feedback, evaluate the outcome, iterate, and scale up from there.\n\nWhen choosing a team or developing an innovation group, avoid thinking along legacy lines which divide teams by stages of the software lifecycle. Think about building a cross-functional team of 8–12 people who can focus on developing the culture, process, and tools needed to continuously deliver software.\n\n## 2. Make smaller changes.\n\nKeep in mind that the impetus for digital transformation and, more specifically, application modernization, is driven from a business need to deliver value to customers faster. So, making smaller changes to release faster is the single most important change you can make.\n\nAdopt the mindset: what is the smallest possible change I can make to improve something, and how do I get it out as quickly as possible? At GitLab, we call this the [minimally viable change (MVC)](/handbook/product/product-principles/#the-minimal-viable-change-mvc), and it’s what allows us to ship nearly anything within a single release. This is especially important when approaching legacy software. If you start making a ton of big changes over a few weeks, the risk of breaking something and not understanding what change caused the error grows exponentially with every change.\n\nWith an MVC mindset, you can experiment with what works best without risking downtime. Smaller changes are easier to review, understand, and roll back if necessary.\n\n## 3. Prioritize mastering continuous delivery and deployment (CD).\n\nYou have your team assembled, you’ve made MVC your mantra, and now it’s time to establish a clear goal. If you’re just [starting down the application modernization road](/blog/application-modernization-examples/), chances are that you don’t quite know what strategy is going to work for your organization yet (that’s what the innovation group is for!). What you do know is that you need to be able to ship features to production faster while maintaining stability and security. By prioritizing understanding your current deployment pipeline and how to [automate to achieve continuous delivery](/topics/continuous-delivery/), you discover how the underlying infrastructure needs to change.\n\nAuthor Gary Gruver outlines this philosophy in his book, [\"Starting and Scaling DevOps in the Enterprise\"](/resources/scaling-enterprise-devops/). He writes:\n\n> It is my personal experience that creating, documenting, automating, and optimizing deployment pipelines in large software/IT organizations is key to improving their efficiency and effectiveness. – Gary Gruver\n\nStart with a single application and document how a change goes from idea all the way to production and monitoring. This will give you a good understanding of how it’s currently operating, what its dependencies are, and how you can start to decouple.\n\nFinally, the end goal is to enable teams with [fully automated CI/CD pipelines](https://docs.gitlab.com/ee/topics/autodevops/) so developers can get their code to production faster. Taking both a cultural and technological approach to change is needed to adopt DevOps methodology.\n\nAre you ready to commit to your digital transformation journey? [Get inspired and learn how Ask Media Group modernized their architecture and development with microservices, containers, and kubernetes](/webcast/cloud-native-transformation/).\n",[855,109,9],{"slug":1554,"featured":6,"template":686},"beyond-application-modernization-trends","content:en-us:blog:beyond-application-modernization-trends.yml","Beyond Application Modernization Trends","en-us/blog/beyond-application-modernization-trends.yml","en-us/blog/beyond-application-modernization-trends",{"_path":1560,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1561,"content":1567,"config":1573,"_id":1575,"_type":14,"title":1576,"_source":16,"_file":1577,"_stem":1578,"_extension":19},"/en-us/blog/biden-administration-celebrates-1-year-anniversary-of-eo-by-accelerating-software-supply-chain-security",{"title":1562,"description":1563,"ogTitle":1562,"ogDescription":1563,"noIndex":6,"ogImage":1564,"ogUrl":1565,"ogSiteName":670,"ogType":671,"canonicalUrls":1565,"schema":1566},"Biden administration updates software supply chain security requirements","GitLab's One DevOps Platform can help agencies comply with government requirements.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667852/Blog/Hero%20Images/eosecurity.jpg","https://about.gitlab.com/blog/biden-administration-celebrates-1-year-anniversary-of-eo-by-accelerating-software-supply-chain-security","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Biden administration accelerates software supply chain security expectations a year into Executive Order\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2022-05-12\",\n      }",{"title":1568,"description":1563,"authors":1569,"heroImage":1564,"date":1570,"body":1571,"category":769,"tags":1572},"Biden administration accelerates software supply chain security expectations a year into Executive Order",[1454],"2022-05-12","\n\nPresident Joe Biden last year on May 12th signed [Executive Order 14028 \"Improving the Nation’s Cybersecurity\"](https://www.whitehouse.gov/briefing-room/presidential-actions/2021/05/12/executive-order-on-improving-the-nations-cybersecurity/), which called on public and private sector organizations to improve the nation’s cybersecurity with “bold change” and “significant investments”. “Incremental improvements will not give us the security we need,” the EO states. Since then, the administration has only increased the pressure on agencies, forcing them to take a hard look at their software supply chains and justify their application development decisions, including how they use open source code, test their code, and grant permissions.\n\n“The federal government has accelerated its expectations for software supply chain security compliance, yet some organizations are still trying to understand how to broadly and proactively protect their software development,” says [Joel Krooswyk](https://gitlab.com/jkrooswyk), Senior Manager of Solutions Architecture at GitLab. “Agencies and their vendors have been focused on policy management and role-based access, but the federal government wants to go deeper and know where code is coming from and how to better secure it. They are quickly moving down the supply chain.” \n\nThe interest in the origins of software code stems from the complexity of cyberattacks such as that [carried out on SolarWinds](/blog/what-the-solarwinds-attack-can-teach-us-about-devsecops/), as well as the ongoing [log4j](/blog/use-gitlab-to-detect-vulnerabilities/) and Spring4Shell vulnerabilities. “Intentionally malicious contributions can inject code that is literally opening the doors to hackers,” Krooswyk says. “However, agencies and vendors can’t just stop utilizing open source software and microservices. They need the ingenuity of the open source community.” GitLab is a proponent of open source and believes [everyone can contribute](/company/mission/).\n\nThe Biden administration, through its frameworks and mandates, is simply saying, 'we have to keep a better eye on that,' especially as more organizations assume a cloud-first posture, according to Krooswyk.\n\nFor example, earlier this year, the National Institute of Standards and Technology (NIST) published the Software Security Development Framework (SSDF) 1.1, which offers guidance on how to [create tighter controls throughout the software development lifecycle](/blog/comply-with-nist-secure-supply-chain-framework-with-gitlab/).\n\nThe SSDF 1.1 framework recommends: \n- organizations should be prepared by reviewing permissions\n- all components of software should be safe from tampering and unauthorized access\n- software should be produced with minimal security vulnerabilities in its releases\n- organizations should be able to quickly and sufficiently respond to vulnerabilities \n\n## Code sourcing\n\nThe next phase in the federal government’s move to secure the software supply chain will be to [require reporting and/or attestation](/blog/securing-the-software-supply-chain-through-automated-attestation/).\n\n“Agencies and their vendors are being asked if their software is justifiably built using properly sourced code. As a result, organizations may have to explain why they chose to use code from non-mainline repositories,” Krooswyk says.\n\nFor instance, if a DevOps team chooses code from a non-mainline repository originating in China, they will have to attest to why they did that over sourcing from a mainline repository. The same idea applies to pulling clean containers and not repeatedly using those plagued with existing vulnerabilities, according to Krooswyk.\n\nHe believes these questions will all be rolled up into a Cybersecurity & Infrastructure Software Agency (CISA) mandate for a [software bill of materials](https://www.cisa.gov/sbom) (SBOM), which is a list of ingredients that make up software components. “The SBOM will show the list of contributors, known vulnerabilities, results of dependency scans on open source, and more,” he says. “The Biden administration, NIST, and CISA are all in alignment on the need for more consistent software security attestation.”\n\n## How to prepare\n\nWhile some agencies, like the U.S. Department of Defense, might be on the cutting edge of these mandates, smaller agencies or those with more legacy infrastructure and practices might require more effort to be able to comply. “If your development, operations, and security processes aren’t transparent or fully documented and if your scanning is still manual, then these new requirements could be a roadblock,” Krooswyk says. “The administration is only going broader in terms of the scope of mandates and more specific with security requirements as time progresses to plug all the security holes, meaning more regulations and further compliance.”\n\nGitLab believes some of the long-term asks expected to come from the government may include:\n- bake security in, don’t bolt it on\n- ensure scanning is top of mind\n- maintain zero-trust permission models and source code management controls\n- any open source software used should have known origins and support SBOM generation, verifiable by dependency scanning\n- purchase secure commercial off-the-shelf software that complies with all security and labeling requirements from standards bodies\n\nGitLab’s One DevOps Platform can help organizations answer this request for software supply chain security compliance through visibility and transparency into processes, verifiable compliance, zero-trust user management, and templated security automation. “While we are helping organizations with cloud adoption and infrastructure modernization, we’re doing so in such a way as to not compromise on risk or security, providing end-to-end traceability and step-by-step auditability from issue creation through deployment,” he says.\n\nGitLab has a distinct set of features that make enabling NIST frameworks and attesting to code sourcing decisions easier:\n- [SBOM creation](https://docs.gitlab.com/ee/user/application_security/dependency_list/#dependency-list) in a standardized format \n- [Security dashboards](https://docs.gitlab.com/ee/user/application_security/security_dashboard/)\n- [Vulnerability reports and remediation](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/)\n- [Pipeline frameworks and compliance](https://docs.gitlab.com/ee/user/project/settings/#compliance-frameworks)\n- [Security scanning breadth of offering](https://docs.gitlab.com/ee/user/application_security/) from SAST and DAST to fuzz testing \n\nAs the EO states, incremental improvements are not enough to properly secure software. To meet the totality, speed, and sophistication of the administration’s demands for cybersecurity protections, consider adopting GitLab’s One DevOps Platform.\n\n",[9,875,682],{"slug":1574,"featured":6,"template":686},"biden-administration-celebrates-1-year-anniversary-of-eo-by-accelerating-software-supply-chain-security","content:en-us:blog:biden-administration-celebrates-1-year-anniversary-of-eo-by-accelerating-software-supply-chain-security.yml","Biden Administration Celebrates 1 Year Anniversary Of Eo By Accelerating Software Supply Chain Security","en-us/blog/biden-administration-celebrates-1-year-anniversary-of-eo-by-accelerating-software-supply-chain-security.yml","en-us/blog/biden-administration-celebrates-1-year-anniversary-of-eo-by-accelerating-software-supply-chain-security",{"_path":1580,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1581,"content":1587,"config":1593,"_id":1595,"_type":14,"title":1596,"_source":16,"_file":1597,"_stem":1598,"_extension":19},"/en-us/blog/bringing-ai-gitlab-repository",{"title":1582,"description":1583,"ogTitle":1582,"ogDescription":1583,"noIndex":6,"ogImage":1584,"ogUrl":1585,"ogSiteName":670,"ogType":671,"canonicalUrls":1585,"schema":1586},"GitLab and Tabnine: AI-powered code completion for GitLab repositories","Development teams can get a custom AI model based on their private code that enables knowledge sharing, reduced technical debt, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682249/Blog/Hero%20Images/blog_2757.png","https://about.gitlab.com/blog/bringing-ai-gitlab-repository","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab and Tabnine: AI-powered code completion for GitLab repositories\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brandon Jung\"}],\n        \"datePublished\": \"2022-03-02\",\n      }",{"title":1582,"description":1583,"authors":1588,"heroImage":1584,"date":1590,"body":1591,"category":791,"tags":1592},[1589],"Brandon Jung","2022-03-02","\n\nAs AI continues to become more ubiquitous throughout [every aspect of our lives](https://www.tabnine.com/blog/is-ai-pair-programming-really-going-to-help-me/), it should come as little surprise that programming has picked it up as a tool to help make developers more productive. Tabnine is integrating with GitLab to bring Tabnine's AI-powered code completion technology to GitLab repositories to improve the accuracy and speed of code development.\n\nWe believe that increased development velocity improves the developer’s working experience, accelerates feature release cadence, and enables teams to respond faster to market opportunities. Users can now get a custom AI model based on their private code that enables: \n\n- Knowledge sharing\n- Reduced technical debt\n- Faster code reviews\n- Faster onboarding and time to value\n\nThe value of a custom model is about helping a specific team with a specific mission be more productive. A team comes in many forms from the most simple [two pizza box team](https://docs.aws.amazon.com/whitepapers/latest/introduction-devops-aws/two-pizza-teams.html) to a large software company with hundreds of internal developers as well as thousands of external developers who contribute to a large shared [open source code base](/community/contribute/). What all these teams have in common is that they have a shared interest in a common code base. This code base for any digital company is one of the most important strategic assets and anything that helps them build it faster and more consistently requires serious consideration.\n\nGitLab has a robust platform for hosting code for private teams, so it is natural that we wanted to make it easier for teams to bring their development models together. Developers can now automate the creation of a custom model based on their private code. The process is outlined below and is seamless for the user as Tabnine will build, validate, and upload the private model for the whole team. New developers can now be added to the team and will immediately receive custom suggestions based on the codified best practices of the team. \n\nThis is the first of ongoing work that Tabnine is doing to support developers together with GitLab and we look forward to getting your feedback on how we can make it better for you individually and for your team.\n\nHere's how to get started: \n\n1. As a Tabnine for Teams user, login to [AI Code Completions for Developers & Teams](https://app.tabnine.com/profile/) \n2. Navigate to the “Team AI” tab\n3. Connect to your GitLab repositories\n4. Tabnine will build, test, and upload your private team model\n5. Enjoy your personalized Tabnine AI assistant\n\n![Getting started](https://about.gitlab.com/images/blogimages/tabnine1.png){: .shadow}\n\nThe GitLab partnership represents Tabnine's latest step towards the goal of an end-to-end development platform supporting all developers regardless of working environments, coding languages, or IDEs. [Share your feedback with Tabnine](https://forms.gle/vCHK5QRoyR5xt6Jg8) on our AI-powered code completion technology.\n\n",[9,726,771],{"slug":1594,"featured":6,"template":686},"bringing-ai-gitlab-repository","content:en-us:blog:bringing-ai-gitlab-repository.yml","Bringing Ai Gitlab Repository","en-us/blog/bringing-ai-gitlab-repository.yml","en-us/blog/bringing-ai-gitlab-repository",{"_path":1600,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1601,"content":1607,"config":1613,"_id":1615,"_type":14,"title":1616,"_source":16,"_file":1617,"_stem":1618,"_extension":19},"/en-us/blog/built-in-ci-cd-version-control-secret",{"title":1602,"description":1603,"ogTitle":1602,"ogDescription":1603,"noIndex":6,"ogImage":1604,"ogUrl":1605,"ogSiteName":670,"ogType":671,"canonicalUrls":1605,"schema":1606},"The market figured out GitLab’s secret","Why we decided to combine version control with CI, and the rise of the single application.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663648/Blog/Hero%20Images/gitlab-joins-cd-foundation.jpg","https://about.gitlab.com/blog/built-in-ci-cd-version-control-secret","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The market figured out GitLab’s secret\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2019-08-08\",\n      }",{"title":1602,"description":1603,"authors":1608,"heroImage":1604,"date":1610,"body":1611,"category":679,"tags":1612},[1609],"Sid Sijbrandij","2019-08-08","\n\nThere’s a movement in the DevOps industry and the world right now: to do more in a simple way that inspires us to innovate. GitLab started this trend in the DevOps space by simplifying the delivery of code by combining GitLab CI and [GitLab version control](/topics/version-control/). We didn't originally buy into the idea that this was the right way to do things, but it became our secret capability that we’ve doubled down on.\n\n## Let’s combine applications\n\nThe story starts with [Kamil Trzciński](/company/team/#ayufanpl), now a distinguished engineer at GitLab. Soon after Kamil came to work for GitLab full time, he began talking with me and my co-founder, [Dmitriy Zaporozhets](/company/team/#dzaporozhets), suggesting that we bring our two projects together – GitLab Version Control and GitLab CI, making it into one application. Dmitriy didn’t think it was a good idea. GitLab version control and CI were already perfectly integrated with single sign-on and APIs that fit like a glove. He thought that combining them would make GitLab a monolith of an application, that it would be disastrous for our code quality, and an unfortunate user experience. After time though, Dmitriy started to think it was the right idea as it would deliver a seamless experience for developers to deliver code quickly.\n\nAfter Dmitriy was convinced, they came to me. I also didn’t think it was a good idea. At the time I believed we needed to have tools that are composable and that could integrate with other tools, in line with the Unix philosophy. Kamil convinced me to think about the efficiencies of having a single application.\n\n>“Well, if you don’t believe that it’s better for a user, at least believe it’s more efficient for us, because we only have to release one application instead of two. Efficiency is in our values.” - Kamil Trzcinski, distinguished engineer at GitLab\n\n## Realizing the future of DevOps is a single application\n\nThat made sense to me and I no longer stood in their way. The two projects merged and the results were beyond my expectations. The efficiencies that were so appealing to us, also made it appealing to our customers. We realized we stumbled on a big secret because nobody believed that the two combined together would be a better way of continuously delivering code to market. We doubled down on this philosophy and we started doing [continuous delivery](/topics/continuous-delivery/).\n\nFrom that day on, I saw the value of having a single application. For example, a new feature we are implementing is auto-remediation. When a vulnerability comes out, say a heart bleed, GitLab will automatically detect where in your codebase that vulnerability exists, update the dependency, and deliver it to your production environment. This level of automation would be hard to implement without being in a single application. By combining the projects we unified teams – helping them realize the original intent of DevOps – and that is magical to see.\n\n## The market validates our secret\n\nAnd while we bet on this philosophy the industry is now seeing it as well. In September of 2015 we [combined GitLab CI and GitLab version control](/releases/2015/09/22/gitlab-8-0-released/) to create a single application. By March of 2017, Bitbucket also realized the advantages of this architecture and [released Pipelines as a built-in part of Bitbucket](https://dzone.com/articles/bitbucket-adds-pipelines). In 2018, [GitHub announced Actions](https://techcrunch.com/2018/10/16/github-launches-actions-its-workflow-automation-tool/) with CI-like functionality built into a single application offering. In the last six months, [JFrog acquired Shippable](https://techcrunch.com/2019/02/21/jfrog-acquires-shippable-adding-continuous-integration-and-delivery-to-its-devops-platform/) and [Idera acquired Travis CI](https://hub.packtpub.com/idera-acquires-travis-ci-the-open-source-continuous-integration-solution/), showing a consolidation of the DevOps market and a focus on CI. The market is validating what we continually hear from our users and customers: that a simple, single DevOps application meets their needs better.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/MNxkyLrA5Aw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWe hope you will continue to join us in our effort to bring teams together to innovate. [Everyone can contribute](/company/mission/#mission) here at GitLab and as always, we value your feedback, thoughts, and contributions.\n\nWant to hear me talk through the origin story? Listen to the [Software Engineering Daily podcast](https://softwareengineeringdaily.com/2019/03/15/gitlab-with-sid-sijbrandij/) where I talk about combining GitLab CI and GitLab Version Control.\n",[109,9],{"slug":1614,"featured":6,"template":686},"built-in-ci-cd-version-control-secret","content:en-us:blog:built-in-ci-cd-version-control-secret.yml","Built In Ci Cd Version Control Secret","en-us/blog/built-in-ci-cd-version-control-secret.yml","en-us/blog/built-in-ci-cd-version-control-secret",{"_path":1620,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1621,"content":1627,"config":1632,"_id":1634,"_type":14,"title":1635,"_source":16,"_file":1636,"_stem":1637,"_extension":19},"/en-us/blog/business-impact-ci-cd",{"title":1622,"description":1623,"ogTitle":1622,"ogDescription":1623,"noIndex":6,"ogImage":1624,"ogUrl":1625,"ogSiteName":670,"ogType":671,"canonicalUrls":1625,"schema":1626},"The business impact of CI/CD","How a good CI/CD strategy generates revenue and keeps developers happy.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670016/Blog/Hero%20Images/modernize-cicd.jpg","https://about.gitlab.com/blog/business-impact-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The business impact of CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"},{\"@type\":\"Person\",\"name\":\"William Chia\"}],\n        \"datePublished\": \"2019-06-21\",\n      }",{"title":1622,"description":1623,"authors":1628,"heroImage":1624,"date":1629,"body":1630,"category":679,"tags":1631},[788,723],"2019-06-21","\n\n[Continuous integration and delivery](/solutions/continuous-integration/) helps [DevOps](/topics/devops/) teams ship higher quality software, faster. But is all [CI/CD](/topics/ci-cd/) created equal? What does successful CI/CD implementation look like and how do you know you’re on the right track?\n\nIn this four-part series, we talk about modernizing your CI/CD: Challenges, impact, outcomes, and solutions. In [part one](/blog/modernize-your-ci-cd/), we focused on common CI/CD challenges. Today, we’ll talk about the revenue impact of a poor or non-existent CI/CD strategy.\n\nIf these problems hit a little too close to home, stay tuned for part three where we dive deeper into what organizations gain when they implement better CI/CD.\n\n## What are the business impacts of bad CI/CD?\n\n### 1. A large portion of IT budget is spent on undifferentiated engineering\n\nOpportunity costs play a much larger role in the development process than we realize. Organizations can only afford so many engineers at one time, and systems that require extensive maintenance means fewer engineers are working on revenue-generating projects. This will lead to slower innovation and slower growth in the long term. Undifferentiated engineering means too many individuals are having to focus on one thing – maintenance.\n\n### 2. Delayed (and even unrealized) revenue\n\nThis is the impact of lost opportunity costs. When there are too many dependencies, too many handoffs, and too many manual tasks, it causes delays between when code is written and when the business gets value from that code. In worst cases, code is written and the business never gets any value from it at all. Code can sit in limbo waiting for others to manually test it, and by the time it’s finally reviewed it’s already irrelevant. The opportunity cost essentially doubles: Engineers were paid to work on code that never deployed, and the business loses out on revenue the code could have generated.\n\n### 3. Lower developer productivity, lower developer happiness, and less reliable software\n\nDowntime = lost revenue. To avoid that dreaded downtime, developers are spending time working on infrastructure and configuration, and they’re also not spending that time delivering business logic. In both cases, they’re being less productive and working outside of their core competencies. Developer hiring and retention will inevitably suffer. Uptime and resiliency are also affected because people who aren’t domain experts are put in charge of determining infrastructure. It’s a self-fulfilling prophecy.\n\n## What does it look like if a magic wand were to solve it today?\n\n### 1. More engineers are working on the app instead of maintenance\n\nThe organization has the right amount of developers devoted to driving business value and spends more time on innovation instead of undifferentiated heavy lifting. Less of the budget is spent on activities that don't generate revenue.\n\n### 2. Developers see their code in production quickly\n\nInfrastructure and deployment are [fully automated](https://docs.gitlab.com/ee/topics/autodevops/). Everyone loves to see the output of their work, developers especially, and the business gets to see the benefits of this code right away. Deploying smaller chunks of code is less risky when developers can take advantage of test automation, so they have less overhead and coordination with a QA team forced to test manually.\n\n### 3. Developers are focused on solving business problems\n\nCode is written to be environment and cloud agnostic. Development teams own the uptime of their own services, but they are fully supported by the ops team. Ops owns the infrastructure, dev owns the service, and both teams can work according to their strengths.\n\nSolving these problems doesn’t require waving a wand or any magic at all. Modernizing your architecture and embracing CI/CD is what other companies are doing to release better software, faster. When organizations implement CI/CD best practices, they get the added benefit of generating more revenue in the long run.\n\nSo what makes “good” CI/CD? We invite you to compare GitLab CI/CD to other CI tools and see why we were rated #1 in the Forrester CI Wave™.\n\n[Explore GitLab CI/CD](/solutions/continuous-integration/)\n{: .alert .alert-gitlab-purple .text-center}\n\nPhoto by [Jungwoo Hong](https://unsplash.com/photos/cYUMaCqMYvI?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/arrow?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,109,683],{"slug":1633,"featured":6,"template":686},"business-impact-ci-cd","content:en-us:blog:business-impact-ci-cd.yml","Business Impact Ci Cd","en-us/blog/business-impact-ci-cd.yml","en-us/blog/business-impact-ci-cd",{"_path":1639,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1640,"content":1646,"config":1651,"_id":1653,"_type":14,"title":1654,"_source":16,"_file":1655,"_stem":1656,"_extension":19},"/en-us/blog/cadence-is-everything-10x-engineering-organizations-for-10x-engineers",{"title":1641,"description":1642,"ogTitle":1641,"ogDescription":1642,"noIndex":6,"ogImage":1643,"ogUrl":1644,"ogSiteName":670,"ogType":671,"canonicalUrls":1644,"schema":1645},"Cadence is everything: 10x engineering organizations for 10x engineers","GitLab CEO and co-founder Sid Sijbrandij on the importance of cadence in engineering organizations.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671909/Blog/Hero%20Images/Athlinks_running.jpg","https://about.gitlab.com/blog/cadence-is-everything-10x-engineering-organizations-for-10x-engineers","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Cadence is everything: 10x engineering organizations for 10x engineers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2022-11-03\",\n      }",{"title":1641,"description":1642,"authors":1647,"heroImage":1643,"date":1648,"body":1649,"category":791,"tags":1650},[1609],"2022-11-03","\nI confess: Although I don’t believe in Bigfoot or Nessie and do believe the moon landings happened, I am convinced that despite the current orthodoxies, [10x engineers](https://svdictionary.com/words/10x-engineer) very much exist and are a major positive force for the industry, and potentially your organization.  If you can find one, convince her to work for you and keep her happy and productive (but I repeat myself).\n\nAlas, finding one is not easy, and no, job adverts stating “We only hire the best” don’t help. However, what you can do is structure your development organization in a way to make such a person productive.  \n\nFortunately, making a 10x developer productive is pretty much the same, as you need to make your development organization productive for everyone, just dialed up to 11, particularly because an inefficient organization will affect a more efficient developer much more dramatically.\n\nUnfortunately, this state appears to be neither natural nor stable.\n\n[Effective organizations are unnatural](https://twitter.com/paulg/status/1556341452740775936?s=21&t=67hekF4Sus5tPryLdZmCHA). The natural state of organizations is bureaucracy and turf wars, and once deprived of effective leadership they revert to their natural state with shocking speed. Similar to organizations in general, development organizations naturally tend toward inefficiency.\n\nMore specifically, development organizations tend toward ever-lengthening cycle times just as much as organizations in general tend toward bureaucracy.  In both cases, this is always for good reasons.  This is really important:  If this tendency toward lengthening cycle times were just stupidity or laziness, it would be significantly easier to counter.  Anthropologist and historian [Joseph Tainter makes a similar point](https://www.youtube.com/watch?v=JsT9V3WQiNA) about civilizations, whose ever-increasing complexity leads to their collapse.  Here as well, the complexity is not introduced willy-nilly, but as a necessary response to problems the civilization faces.  \n\n## The sky’s the limit\n\nSoftware tends to be fairly abstract, but the principles of short cycle times are just applicable in more down-to-earth disciplines, or should I say down-to-air?  First, one of my favorites, the story of how Paul MacCready created the Gossamer Condor to win the first Kremer Prize for human-powered flight.  More recently, Elon Musk’s SpaceX has been out-iterating NASA and the legacy spaceflight companies with results that would have seemed miraculous a couple of decades ago.  Both examples show that while other factors are obviously more important, cadence actually dominates them in short order.\n\nMacGready had come into a bit of debt due to securing a friend’s business loan, and set his eyes on the first Kremer prize for human-powered flight. This had gone unclaimed for 17 years, but not for lack of trying: There had been over 50 official attempts and all failed.  It was a Very Hard Problem we couldn’t solve so it obviously required the most aerodynamically efficient and sophisticated designs possible.  So that’s what people did, and when their sophisticated plane inevitably crashed — after all they were working on the edge of the possible — it took them a year or more to rebuild it.\n\nMacGready approached this from the opposite angle:  He would concentrate on a plane that didn’t have to be so efficient and sophisticated, but instead would fly low and slow, be light and very repairable, aiming for 12 crashes a day. The Gossamer Condor was built out of some lightweight aluminum struts and mylar foil and could usually be repaired with Scotch tape. It was a weird contraption that didn’t look like it could fly.\n\n![The Condor](https://about.gitlab.com/images/blogimages/10x.png)\n\nWithin a few months, the team had accumulated more flights, and more crashes, than the rest of the competition combined. With all that experience, they then also understood the actual problems better than anyone else, for example, how to steer, and soon won the prize, which involved flying a mile in a circle eight.  \n\nThis wasn’t a [one-off fluke](https://www.youtube.com/watch?v=FvmTSpJU-Xc&t=3348s) either: The team went on to win the next Kremer prize as well, crossing the English Channel, then pioneered solar flight and broke the SR-71’s altitude record. The company that came out of the effort nowadays makes drones, including the successful Switchblade drones for the U.S. military that have recently been sent to help in the Ukraine conflict.\n\n## The sky’s not the limit\n\nMore recently, SpaceX has been demonstrating the efficacy of iterative development, first with the Falcon 9 rocket and now with the Starship program. While the latter hasn’t flown to space yet, and so may still fail completely, both the aim and the achievements so far have been breathtaking, particularly compared to NASA’s Space Launch System (SLS), which was started around the same time and is designed to have similar capabilities, lifting around 100 tons to low earth orbit.\n\nThe NASA SLS is a cost-reduced version of the Constellation program, which was canceled early after quickly outgrowing its projected $150 billion dollar budget.  The reduced development cost of the SLS (so far $23 billion in 10 years) has been achieved by reusing not just designs, but also actual parts from the Space Shuttle program.  Not just the solid rocket boosters, but some of the main engines are the actual parts that flew on shuttles and had been mothballed by NASA.  Despite this part reuse, launches of the fully expendable rocket are predicted to cost somewhat upward of $1 billion per pop.  As of Oct. 20, there have been no flights of any of the hardware (except on space shuttles), and the first test launch scheduled for Nov. 26th will fly the full stack as designed.\n\nIn comparison, the Starship program is estimated to have cost $3 billion so far, with estimates of total development costs varying between $5 billion and $10 billion. This is for a completely new rocket, pretty much unlike any that have come before, designed for full reusability and same-day turnaround after refueling, completely new methane-burning engines, assembly-line production using relatively inexpensive materials and a projected cost target of $10 million per launch. If they work as advertised, just a few Starships could turn the entire launch capacity of planet Earth thus far into a footnote, a rounding error, and they plan to build a thousand of them. That’s why they’re building a factory for making them.\n\nIt’s anyone’s guess whether all this launch capacity, at costs two or more magnitudes lower than currently possible, is really for making humanity multiplanetary by establishing a Mars colony or “just” for making space-based production and asteroid mining feasible.\n\nWhen asked, [Elon Musk put it quite simply](https://www.youtube.com/watch?v=E7MQb9Y4FAE&t=333s):\n\n_“Any given technology development is how many iterations do you have and what’s your time and progress between iterations.”_\n\nThe more quickly you can iterate, the more iterations you have available.  But doesn’t iterating more quickly make the progress between iterations correspondingly less, canceling the effect?  Surprisingly, that turns out not to be the case.  Elon Musk again:\n\n_“So if you have a high production rate, you can have a lot of iterations. You can try  lots of different things, and it’s OK if you blow up an engine because you’ve got a number of engines coming after that.  If you have a small number of engines then you have to be much more conservative, because you can’t risk blowing them up.”_\n\nThe higher iteration rate allows you to take more risks, which in turn allows you to push the boundaries more and thus gather more relevant feedback in each iteration, at the same time that the reduced time frame reduces what you can do. So there will be more failures. For example, engines blowing up or planes crashing.  But as long as the failures provide the information they were supposed to provide, and the individual failure modes aren’t fatal, they aren’t actually failures. You obviously don’t want to be cavalier about this, but accepting that risk allows you to push much farther per iteration.  Musk also mentioned that as one of the main problems of the Space Shuttle program:  They couldn’t afford to have one blow up because even the first flight was manned.\n\n“A high production rate solves many ills,” he says.\n\nIn software, the production rate is the iteration rate.  If you have lots of iterations, it’s OK if one of them was a potentially high-value experiment that didn’t pan out.  If you have one iteration per year, you are less likely to want to take that risk, and your reluctance will be justified. The willingness and ability to take risks is captured in the Extreme Programming (XP) [value of ‘courage.”](http://xp.c2.com/ExtremeValues.html)\n\n## Compound interest and experience\n\nThe reason this works out is mathematical.  If you iterate and actually use the feedback the iteration gives you to improve, you will improve a little bit each time because you will have learned something.  For simplicity’s sake, let’s assume an improvement of 5% per iteration.  This is like compound interest, and while it starts slow, once it ramps up, it gives outsize returns, like any exponential.\n\nImprove 2% per iteration, and after three iterations, you will have improved by 6%, which is essentially the same as a linear improvement.  After 200 iterations, however, and whereas the linear approach will have improved by a respectable factor of 4, the iterative approach will have improved by more than 50x.\n\nApart from the purely mathematical, there is also the human factor:  When we do things over and over again, we really start to figure out how it works. We develop an intuition.\n\n## What the science says\n\nThe simplistic mathematical function is obviously not an accurate model of the real world, but the science actually has concluded that higher iteration rates are the one most important factor for the output of software development teams, at least according to the researchers.  These findings have been published in the book “[Accelerate](https://itrevolution.com/product/accelerate/)\" by Nicole Forsgren, Jez Humble and Gene Kim.  The authors have since moved to Google as the DevOps Research and Assessment (DORA) team and make their [findings available here](https://cloud.google.com/blog/products/devops-sre/announcing-dora-2021-accelerate-state-of-devops-report).  \n\nIn short, they find that performance of software teams correlates strongly with cycle times, with the lowest- performing teams having cycle times measured in months, medium performers in weeks, good performers in days and excellent performers in hours. There is also good evidence for the causality going for cycle times to performance and not the other way around.\n\nBut there’s a deeper connection, because the method of iterating on real-world feedback is really just the scientific method, no more, no less.  It is somewhat surprising that in the field of software, we still often consider the scientific method as unruly and dangerous “cowboy coding,” and instead advocate for what is really little different from pre-science scholasticism as the proper approach to creating software.\n\nTo help us also be more scientific and data driven, the DORA team created metrics, called the [DORA metrics](https://cloud.google.com/blog/products/devops-sre/using-the-four-keys-to-measure-your-devops-performance). These are the following:\n\n- Deployment frequency — How often an organization successfully releases to production\n- Lead time for changes — The amount of time it takes a commit to get into production\n- Change failure rate — The percentage of deployments causing a failure in production\n- Time to restore service — How long it takes an organization to recover from a failure in production\n\n## The dangers of dead reckoning\n\nIn reality, it is much more dangerous to stay away from actual code and real feedback from users for any length of time.  For example, ships before GPS used essentially two methods for navigation: dead reckoning and external fixes.  With dead reckoning, you took a known position, added the course speed and known currents over time to come up with a new position.  However, despite the best equipment and methods, this method always introduces some error because the external factors cannot be known with certainty. And what’s worse, just like improvements accumulate and build on each other over time, so do these errors, making the position ever more uncertain over time.\n\nWhen you are in the middle of the ocean, that might not be a huge problem, but close to shore it can be deadly, which is why the amphibious ships of the Royal Navy were required to use position fixing in intervals of a few minutes. With position fixing, you use the actual external environment, landmarks that you can triangulate to determine your position (and of course GPS is just a version of this, except using satellites for the fix instead of landmarks).  This means you aren’t guessing where you are, you know where you are, and every new measurement clears the slate of any errors; there is no accumulation.\n\nSlides don’t crash, and Jira is patient. You can have 100 tasks that are marked as 99% completed in your tracker of choice and still never ship anything to customers.\n\nReality is that which, when you stop believing in it, doesn’t go away, said science fiction writer Phillip K. Dick, in [How to Build a Universe that Doesn’t Fall Apart Two Days Later](https://deoxy.org/pkd_how2build.htm).\n\nIn Part 2, The Process Equation, we will look at overcoming the forces that tend to push software engineering organizations toward higher cycle times and lower cadence.\n",[813,9],{"slug":1652,"featured":6,"template":686},"cadence-is-everything-10x-engineering-organizations-for-10x-engineers","content:en-us:blog:cadence-is-everything-10x-engineering-organizations-for-10x-engineers.yml","Cadence Is Everything 10x Engineering Organizations For 10x Engineers","en-us/blog/cadence-is-everything-10x-engineering-organizations-for-10x-engineers.yml","en-us/blog/cadence-is-everything-10x-engineering-organizations-for-10x-engineers",{"_path":1658,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1659,"content":1665,"config":1670,"_id":1672,"_type":14,"title":1673,"_source":16,"_file":1674,"_stem":1675,"_extension":19},"/en-us/blog/can-an-smb-or-start-up-be-too-small-for-a-devops-platform",{"title":1660,"description":1661,"ogTitle":1660,"ogDescription":1661,"noIndex":6,"ogImage":1662,"ogUrl":1663,"ogSiteName":670,"ogType":671,"canonicalUrls":1663,"schema":1664},"Can an SMB or start-up be too small for a DevOps platform?","It may sound counter-intuitive but even a very small company or startup can take advantage of the power of a DevOps platform. Here's how.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668213/Blog/Hero%20Images/innersourcing-improves-collaboration-within-an-organization.jpg","https://about.gitlab.com/blog/can-an-smb-or-start-up-be-too-small-for-a-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Can an SMB or start-up be too small for a DevOps platform?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-04-06\",\n      }",{"title":1660,"description":1661,"authors":1666,"heroImage":1662,"date":1667,"body":1668,"category":769,"tags":1669},[810],"2022-04-06","\n\nIf you work in an IT team of five people – or maybe you’re even a team of one – it’s easy to think your business is simply too small to use DevOps.\n\nBut that’s not the case. A start-up or small and medium-sized business (SMB) is never too small to take advantage of a DevOps platform. \n\nIn fact, DevOps is [a great fit for a lot of SMBs](https://page.gitlab.com/resources-ebook-smb-beginners-guide-devops.html), or small and medium-sized enterprises (SMEs). Here’s how to understand if it will work for your team or organization and how it could help grow your business in a competitive environment.\n\n## The size of the business isn’t the issue\n\nLet’s be clear. If you are developing software, you need a DevOps platform. Size isn’t really the issue. No matter how small your business and your tech team, if you are iterating on software features, building applications, or automating parts of your product-related systems, then you do need DevOps. DevOps will even work for a team of one.\n\nHere’s how a DevOps platform can help an SMB:\n\n### Start small to foster innovation\n\nOne of the key aspects of DevOps is that it creates a [collaborative atmosphere](/blog/collaboration-communication-best-practices/), even beyond the software and IT teams. Adopting a single, end-to-end DevOps platform when your company is small or your start-up is just getting off the ground will enable and encourage everyone – whether they’re in a technical role or work in accounting, sales or as a business manager – to all work together. And that will foster innovation by bringing in ideas from people in a range of demographics and business interests. And innovative ideas will help new businesses get a foot in the door and help all SMBs grow into more successful and bigger companies.\n\n### Optimize your SMB for speed\n\nTo get established in the market, start-ups and small businesses need to deliver compelling products quickly, and be able to efficiently support them. DevOps will enable your team to [move from planning to production](/blog/pipelines-as-code/) faster and with greater ease. A DevOps platform extends through the entire software development lifecycle, from planning all the way through to launching new features, conducting analysis, and gathering feedback. Simply put, DevOps will optimize your organization for speed, which is just what SMBs and SMEs need.\n\n### Use DevOps to take on the “deep pockets”\n\nAs an SMB, you likely don’t have the deep pockets and market penetration of your more-established competitors. How do you boost your odds when taking them on? One way to increase your competitiveness is to use DevOps to boost speed and efficiency as you create new products, new services, and new ways to communicate with your customers. When you can deploy innovative ideas faster than your competitors, you’ll have a definite advantage.\n\n### Decrease your workload with automation\n\nWhen you have fewer hands to take on a huge workload, you need a way to not only speed production but to ease the number of tasks you’re facing – and all the headaches that come along with them. The [automation that is part of a DevOps platform](/blog/want-faster-releases-your-answer-lies-in-automated-software-testing/) will mean less manual work when it comes to processes like design, testing, development, deployment, and monitoring. Automation helps small teams free up time to handle all the other projects on their to-do lists. \n\n### Build security into software from the get-go\n\nWhen a company is getting started, it’s the perfect time to use DevOps to help build security into the code and processes from the very beginning. Small companies and startups need to “shift left” and focus on [security at the earliest stages](/blog/want-secure-software-development-our-top-5-tips-to-bring-dev-and-sec-together/). When security is baked in from the start, you won’t have to go back in later on to fix problems that could jeopardize your customers and your business.\n\n### Use DevOps to avoid silos\n\nMaybe your company is small enough that silos aren’t a problem… yet. But as a company grows, people often naturally separate off into [silos or groups that do not communicate with or understand each other](/blog/developing-a-successful-devops-strategy/). And they definitely don’t work well together. By fostering collaboration among IT teams and even non-technical groups across the business, a DevOps platform makes it easier to keep these silos from forming in the first place, and to break them down if they do form. As companies grow from 10 employees to 100 (or more), DevOps will help an organization stay connected and collaborative as it expands.\n\n### Start early to ensure collaboration \n\nIt’s easier to create a collaborative culture from the very beginning – when a company is still a start-up or an SMB – than to overhaul a large, established organization. Instilling an environment of [communication and collaboration](/blog/if-its-time-to-learn-devops-heres-where-to-begin/) is less disruptive and easier to manage in a company of 10, 25, or even 100 than in a much larger and complex business that is adding hundreds of employees a year. SMBs have the “nimble” advantage, meaning that change is easier than for larger competitors. \n\nSo there is no company too small to take advantage of a DevOps platform.\n",[9,749,749],{"slug":1671,"featured":6,"template":686},"can-an-smb-or-start-up-be-too-small-for-a-devops-platform","content:en-us:blog:can-an-smb-or-start-up-be-too-small-for-a-devops-platform.yml","Can An Smb Or Start Up Be Too Small For A Devops Platform","en-us/blog/can-an-smb-or-start-up-be-too-small-for-a-devops-platform.yml","en-us/blog/can-an-smb-or-start-up-be-too-small-for-a-devops-platform",{"_path":1677,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1678,"content":1684,"config":1691,"_id":1693,"_type":14,"title":1694,"_source":16,"_file":1695,"_stem":1696,"_extension":19},"/en-us/blog/can-chatgpt-resolve-gitlab-issues",{"title":1679,"description":1680,"ogTitle":1679,"ogDescription":1680,"noIndex":6,"ogImage":1681,"ogUrl":1682,"ogSiteName":670,"ogType":671,"canonicalUrls":1682,"schema":1683},"Testing ChatGPT: Can it solve a GitLab issue?","We put ChatGPT to the test to see if it could contribute to GitLab. Here's what we learned.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670171/Blog/Hero%20Images/akshay-nanavati-Zq6HerrBPEs-unsplash.jpg","https://about.gitlab.com/blog/can-chatgpt-resolve-gitlab-issues","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Testing ChatGPT: Can it solve a GitLab issue?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Coghlan\"},{\"@type\":\"Person\",\"name\":\"Fatima Sarah Khalid\"}],\n        \"datePublished\": \"2022-12-15\",\n      }",{"title":1679,"description":1680,"authors":1685,"heroImage":1681,"date":1688,"body":1689,"category":791,"tags":1690},[1686,1687],"John Coghlan","Fatima Sarah Khalid","2022-12-15","\nChatGPT has taken the tech world by storm since its [launch on November 30](https://openai.com/blog/chatgpt/). Media coverage, front page posts on Hacker News, Twitter threads, and videos - everywhere you look, there is another story.\n\nThe [GitLab Slack](/handbook/communication/#slack) was no different. In threads across Slack channels, including those for developer evangelism, UX, the CEO, random news, and every space in between, our team was chatting about this exciting new tool.\n\nAs we got more familiar with the tool, we started to learn about numerous things it can do. Here are a few that we found:\n\n- It can write poetry about GitLab features. \n- It can write blog posts.\n- It can write unit tests.\n- It gives advice on how to use certain features of GitLab.\n- It conducts competitive analysis.\n\nThere’s quite a bit more out there, including [inventing a new language](https://maximumeffort.substack.com/p/i-taught-chatgpt-to-invent-a-language) and [building a virtual machine](https://www.engraved.blog/building-a-virtual-machine-inside/). We can’t recall any technology that has generated more excitement in such a short time.\n\nWe acknowledge there are ethical and licensing concerns around using AI-generated code. For the purpose of this blog post, we will focus strictly on the capabilities of ChatGPT.\n\n## Testing ChatGPT\n\nAs members of GitLab’s [Developer Relations team](/handbook/marketing/developer-relations/), where we’re focused on growing our community of contributors and evangelists, our first reaction was to think of how this tool can help our contributors. The responses to questions like “How can I get started contributing to GitLab?” were cool but didn’t move the needle. So then we asked ourselves: Can we use ChatGPT to make a contribution to GitLab?\n\nHaving already been testing the tool, we knew we’d need to look for a very specific type of issue. We started to fine-tune our search. Here are the steps we took to find a potential issue:\n\n- Visited [https://gitlab.com/gitlab-org](https://gitlab.com/gitlab-org) and pulled up all the open issues by clicking on `Issues` in the left side nav.\n- Searched for all issues with the [“good for new contributors” label](https://gitlab.com/gitlab-org/gitlab/-/issues/?sort=updated_desc&state=opened&label_name%5B%5D=good%20for%20new%20contributors&first_page_size=20). This returned 482 issues at the time of writing.\n- Set [`“Assignee = None”`](https://gitlab.com/gitlab-org/gitlab/-/issues/?sort=updated_desc&state=opened&label_name%5B%5D=good%20for%20new%20contributors&assignee_id=None&first_page_size=20) to filter out issues that have already been assigned to someone. This trimmed things down even more.\n\nAfter poking through a few issues, this one caught our attention: an issue that is a request to [de-depulicate identical dropdown functionality from two Vue components into a shared component](https://gitlab.com/gitlab-org/gitlab/-/issues/221293).\n\n### Getting started\n\nWe opened up a ChatGPT thread via [https://chat.openai.com/chat](https://chat.openai.com/chat). If you don’t already have an account, you’ll need to register with OpenAI.\n\nThen we started a thread with Assistant (that’s what is returned when you ask ChatGPT what is its name). We asked Assistant to create a shared Vue component called `SharedDropdownFilter` from the duplicate code in `GroupsDropdownFilter` and `ProjectsDropdownFilter` and passed the scripts of both components.\n\nChatGPT replied with:\n\n>You can create a new `SharedDropdownFilter` component that contains the duplicate code and use it in both `GroupsDropdownFilter` and `ProjectsDropdownFilter`:\n\nand provided a template to describe the use. The template provided wasn’t quite what we were looking for.\n\n```vue\n\u003Ctemplate>\n  \u003CGlDropdown\n    :aria-label=\"label\"\n    :class=\"{ disabled: loading }\"\n    :disabled=\"loading\"\n    :selected=\"selectedProjectsLabel\"\n    :variant=\"multiSelect ? 'inline' : 'single-line'\"\n  >\n    \u003Ctemplate v-slot:menu>\n      \u003Cdiv class=\"py-4\">\n        \u003CGlSearchBoxByType\n          autofocus\n          class=\"search-box\"\n          :placeholder=\"searchPlaceholder\"\n          :type=\"searchType\"\n          v-model=\"searchTerm\"\n        />\n\n        \u003CGlDropdownHeader\n          v-if=\"selectedProjects.length && multiSelect\"\n          :class=\"{ 'd-none': !searchTerm }\"\n          class=\"js-dropdown-header\"\n        >\n          \u003Cspan>\n            {{ selectedProjectsLabel }}\n          \u003C/span>\n\n          \u003Cbutton\n```\n\nSo we tried again, this time being more specific and asking ChatGPT to “Create a shared Vue component that combines the two components `ProjectsDropdownFilter` and  `GroupsDropdownFilter`”. We passed in the full component files, including the templates.\n\nChatGPT responded with instructions, a sample Vue component, and an explanation of the functionality:\n\n> Here is a sample implementation of a shared Vue component that combines the two components `ProjectsDropdownFilter` and `GroupsDropdownFilter`:\n\n```vue\n\u003Ctemplate>\n  \u003Cdiv>\n    \u003CProjectsDropdownFilter\n      :groupId=\"groupId\"\n      :multiSelect=\"multiSelect\"\n      :label=\"label\"\n      :queryParams=\"queryParams\"\n      :defaultProjects=\"defaultProjects\"\n      @selected=\"onProjectsSelected\"\n    />\n    \u003CGroupsDropdownFilter\n      :multiSelect=\"multiSelect\"\n      :label=\"label\"\n      :queryParams=\"queryParams\"\n      :defaultGroups=\"defaultGroups\"\n      @selected=\"onGroupsSelected\"\n    />\n  \u003C/div>\n\u003C/template>\n\n\u003Cscript>\nimport { ProjectsDropdownFilter } from './ProjectsDropdownFilter.vue';\nimport { GroupsDropdownFilter } from './GroupsDropdownFilter.vue';\n\nexport default {\n  name: 'SharedDropdownFilter',\n  components: {\n    ProjectsDropdownFilter,\n    GroupsDropdownFilter,\n  },\n  props: {\n    groupId: {\n      type: Number,\n      required: true,\n    },\n    multiSelect: {\n      type: Boolean,\n      required: false,\n      default: false,\n    },\n    label: {\n      type: String,\n      required: false,\n      default: 'Dropdown filter',\n    },\n    queryParams: {\n      type: Object,\n      required: false,\n      default: () => ({}),\n    },\n    defaultProjects: {\n      type: Array,\n      required: false,\n      default: () => [],\n    },\n    defaultGroups: {\n      type: Array,\n      required: false,\n      default: () => [],\n    },\n  },\n  data() {\n    return {\n      selectedProjects: this.defaultProjects || [],\n      selectedGroups: this.defaultGroups || [],\n    };\n  },\n  methods: {\n    onProjectsSelected(selectedProjects) {\n      this.selectedProjects = selectedProjects;\n      this.$emit('selected', this.selectedProjects, this.selectedGroups);\n    },\n    onGroupsSelected(selectedGroups) {\n      this.selectedGroups = selectedGroups;\n      this.$emit('selected', this.selectedProjects, this.selectedGroups);\n    },\n  },\n};\n\u003C/script>\n```\n\n> In this implementation, the `SharedDropdownFilter` component is rendered as a container for the `ProjectsDropdownFilter` and` GroupsDropdownFilter` components. It passes down the `groupId`, `multiSelect`, `label`, `queryParams`, and `defaultProjects` props to the `ProjectsDropdownFilter` component, and the `multiSelect`, `label`, `queryParams`, and `defaultGroups` props to the `GroupsDropdownFilter` component. It also listens to the selected event emitted by the two components and updates its own `selectedProjects` and `selectedGroups` data accordingly. The `SharedDropdownFilter` component emits its own `selected` event with the updated `selectedProjects` and `selectedGroups` data when it receives the selected event from the `ProjectsDropdownFilter` and `GroupsDropdownFilter` components. This allows the parent component of the `SharedDropdownFilter` to be notified whenever the selected projects and groups are updated, so that it can take appropriate action, such as updating the data it displays.\n\nWith ChatGPT’s shared Vue component, we could start an MR for the issue, but would need to manually review the code and run tests in order to make sure the deduplication was done correctly.\n\n### Adjusting the filters\n\nIn hopes of finding an issue that ChatGPT could fully solve, this time we filtered using two labels (`good for new contributors` and `quick win`), Assignee = None, and added the keyword “refactor” to look for issues that required less net new code. At the time of writing, this [query](https://gitlab.com/gitlab-org/gitlab/-/issues/?search=refactor&sort=updated_asc&state=opened&label_name%5B%5D=quick%20win&label_name%5B%5D=good%20for%20new%20contributors&assignee_id=None&first_page_size=20) returned one issue: [a refactor for widget state tests](https://gitlab.com/gitlab-org/gitlab/-/issues/228611).\n\nAfter reviewing the issue, we opened the `/spec/frontend/vue_merge_request_widget/stores/get_state_key_spec.js` file mentioned in the issue description by navigating to that file on GitLab.com SaaS and opening the dropdown on the blue button at the top of the file frame, selecting “Open in Web IDE”, and then clicking the button to open the file in GitLab’s built-in Web IDE.\n\nWe copied the code from the file and asked ChatGPT to refactor these tests into a table of `it.each`. It was interesting to watch the output as ChatGPT provided an example of how we could refactor the tests using `it.each`:\n\n```javascript\nimport getStateKey from '~/vue_merge_request_widget/stores/get_state_key';\n\ndescribe('getStateKey', () => {\n  const context = {\n    mergeStatus: 'checked',\n    autoMergeEnabled: false,\n    canMerge: true,\n    onlyAllowMergeIfPipelineSucceeds: false,\n    isPipelineFailed: false,\n    hasMergeableDiscussionsState: false,\n    isPipelineBlocked: false,\n    canBeMerged: false,\n    projectArchived: false,\n    branchMissing: false,\n    commitsCount: 2,\n    hasConflicts: false,\n    draft: false,\n    detailedMergeStatus: null,\n  };\n\n  it.each([\n    [{}, null],\n    [{detailedMergeStatus: 'MERGEABLE'}, 'readyToMerge'],\n    [{detailedMergeStatus: 'DISCUSSIONS_NOT_RESOLVED'}, 'unresolvedDiscussions'],\n    [{detailedMergeStatus: 'DRAFT_STATUS'}, 'draft'],\n    [{detailedMergeStatus: 'CI_MUST_PASS'}, 'pipelineFailed'],\n    [{shouldBeRebased: true}, 'rebase'],\n    [{hasConflicts: true}, 'conflicts'],\n    [{detailedMergeStatus: 'CHECKING'}, 'checking'],\n    [{commitsCount: 0}, 'nothingToMerge'],\n    [{commitsCount: 1, branchMissing: true}, 'missingBranch'],\n    [{projectArchived: true}, 'archived'],\n  ])('should return \"%s\" when called with %p', (expected, input) => {\n    const bound = getStateKey.bind(Object.assign({}, context, input));\n    expect(bound()).toEqual(expected);\n  });\n\n  it('returns rebased state key', () => {\n    const context = {\n      mergeStatus: 'checked',\n      autoMergeEnabled: false,\n      canMerge: true,\n      onlyAllowMergeIfPipelineSucceeds: true,\n      isPipelineFailed: true,\n      hasMergeableDiscussionsState: false,\n      isPipelineBlocked: false,\n      canBeMerged: false,\n      shouldBeRebased: true,\n      projectArchived: false,\n      branchMissing: false,\n      commitsCount: 2,\n      hasConflicts: false,\n      draft: false,\n    };\n    const bound = getStateKey.bind(context);\n\n    expect(bound()).toEqual('rebase');\n  });\n});\n\n\n```\n\nAlong with an explanation of `it.each`:\n\n> Note that `it.each` is a utility provided by Jest that allows you to write the same test with different inputs and expected results. This can make your tests more concise and easier to read.\n\nOur next steps were to:\n- Paste the output from ChatGPT into the Web IDE.\n- Click the blue “Commit” button to commit the code.\n- Name the branch.\n- Open a merge request.\n\nAll of this can be done inside [GitLab’s Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/) without leaving the browser.\n\n### Examining the results\n\nOnce we opened the merge request, a CI/CD pipeline was created to test the change. Unfortunately, the pipeline jobs for the Jest tests failed because the tests were returning `null` for each case instead of the expected results. We went back to ChatGPT to see if it could help us understand why the test was returning null for all cases. ChatGPT suggested a few different implementations to check to make sure things are defined currently:\n\n> There are a few reasons why the tests in the `it.each` function might be returning null for every case. Here are a few possibilities:\n>\n> If the `getStateKey` function is not implemented correctly, it might always return null, regardless of the input.\n> If the `getStateKey` function relies on certain variables that are not defined in the test cases, it might return null because those variables are undefined.\n> If the context object passed to the `getStateKey` function is not being merged correctly with the input object in the test cases, the function might not have access to all of the necessary information, and it might return null.\n\nWhile ChatGPT couldn’t solve the issue with the returned value directly, it provided a lot of possibilities to look into and offered suggestions of what variables or functions to debug. The suggestions prompted us to add code changes that logged the variable values onto the [JavaScript debug console](https://www.w3schools.com/js/js_debugging.asp), find the implementation of the `getStateKey` function, and figure out why the return value was null. Whenever we came across something unfamiliar in the code, like syntax in the `it.each` that wasn’t familiar, we asked ChatGPT for clarification or a helpful example. Many times throughout this experiment, working with ChatGPT felt like “rubber duck debugging,” but with an AI with which you have to be very specific about your ask.\n\n## What we learned from ChatGPT\n\nIn the end, we weren’t able to figure out why our tests were returning null, so we asked the front-end team if someone could review the code. Senior Frontend Engineer [Angelo Gulina](https://gitlab.com/agulina) reviewed the MR. He found that the solution was actually quite trivial: The order of parameters was inverted, resulting in a comparison that led to null! In his assessment, ChatGPT wasn’t able to provide a working solution, but would be able to provide solutions and ideas to an engineer with some experience with the codebase. It delivered a clean, organized solution and answered the task of combining the tests into an it.each table. It could not, however, catch the actual error (the inversion of parameters) or correctly guess why the tests were returning null.\n\nLet's circle back to the question that started this experiment: Can we use ChatGPT to contribute to GitLab? At this time, we’d say, \"yes,\" and you will need some understanding of the code to complete your solution. Since ChatGPT is a language model trained by OpenAI, it can only answer questions and provide information addressed in the model, which means answers requiring contextual specificity may fall short of what is needed to resolve an issue. However, it’s a tool that can help you if you’re stuck, need more clarification on a code snippet, or are trying to refactor some code. It was fascinating for us to experiment with ChatGPT and we were excited to see what it was capable of. The code provided, however, lacked some of the valuable insight and industry experience that a community of contributors can provide.\n\nAt GitLab, our [community and our open source stewardship](https://about.gitlab.com/company/strategy/#dual-flywheels) are part of our company strategy. Thousands of open source contributors worldwide have helped make GitLab what it is today. We see potential for ChatGPT and similar AI tools, not as a replacement for our community, but a way to make our community more efficient and enable more people to contribute GitLab.\n\n\n\n\n",[978,267,1515,9],{"slug":1692,"featured":6,"template":686},"can-chatgpt-resolve-gitlab-issues","content:en-us:blog:can-chatgpt-resolve-gitlab-issues.yml","Can Chatgpt Resolve Gitlab Issues","en-us/blog/can-chatgpt-resolve-gitlab-issues.yml","en-us/blog/can-chatgpt-resolve-gitlab-issues",{"_path":1698,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1699,"content":1705,"config":1711,"_id":1713,"_type":14,"title":1714,"_source":16,"_file":1715,"_stem":1716,"_extension":19},"/en-us/blog/career-spotlight-sre-vs-devops-engineer-vs-devops-platform-engineer",{"title":1700,"description":1701,"ogTitle":1700,"ogDescription":1701,"noIndex":6,"ogImage":1702,"ogUrl":1703,"ogSiteName":670,"ogType":671,"canonicalUrls":1703,"schema":1704},"DevOps careers: SRE, engineer, and platform engineer","Where does an SRE leave off and a DevOps engineer (or platform engineer) begin? Here's what you need to know.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666685/Blog/Hero%20Images/comparing-confusing-terms-in-github-bitbucket-and-gitlab-cover.jpg","https://about.gitlab.com/blog/career-spotlight-sre-vs-devops-engineer-vs-devops-platform-engineer","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevOps careers: SRE, engineer, and platform engineer\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lauren Gibbons Paul\"}],\n        \"datePublished\": \"2022-04-25\",\n      }",{"title":1700,"description":1701,"authors":1706,"heroImage":1702,"date":1708,"body":1709,"category":769,"tags":1710},[1707],"Lauren Gibbons Paul","2022-04-25","Even if you’re totally happy in your current position, it pays to keep an eye on your DevOps career path and learn about emerging roles, especially given [the way the DevOps space evolves so rapidly](https://www.simplilearn.com/is-a-devops-career-right-for-you-article). \n\nFor example, you might be wondering about the role of site reliability engineer (SRE) as opposed to DevOps engineer (and the totally new position called DevOps platform engineer, more on that later). These are all engineering positions requiring tech expertise and coding chops, but they play distinct roles on the DevOps team. Here’s what you need to know:\n\n## SRE: A seasoned role\n\nAs the title suggests, at a high level, SREs focus primarily on reliability, solving operational, scale, and uptime problems. In 2003, Google originated the SRE role to safeguard the uptime of its site, but it has evolved considerably since the advent of cloud native applications and platforms. Today, SREs concentrate on [minimizing the frequency and impact of failures](https://thenewstack.io/the-evolution-of-the-site-reliability-engineer-sre/) that can impact the overall reliability of a cloud\napplication. \n\nAccording to Glassdoor, SREs typically require a Bachelor’s or graduate engineering or computer science degree. Salaries range widely, according to Glassdoor, hitting about $120,000 after 2 to 4 years of experience but can reach up to [$300,000 and higher](https://www.glassdoor.com/Salaries/us-site-reliability-engineer-salary-SRCH_IL.0,2_IN1_KO3,28.html) at the senior level.\n\nAt least one blogger feels [the SRE title](https://rootly.com/blog/should-you-be-an-sre-or-a-devops-engineer) carries more prestige and earning potential than DevOps engineers.\n\nTypical SRE responsibilities include everything from designing, developing, installing, and maintaining software solutions to working with engineering teams to refine deployment and release processes. Collaboration and communication are important job skills for the SRE role, as they need to work closely with multiple roles across the organization. At the time of this blog's publication, there were 4,000 SRE jobs on Glassdoor. Indeed had more than 5,000 SRE postings and ZipRecruiter showed [nearly 12,000 posts](https://www.ziprecruiter.com/candidate/search?radius=5000&amp;search=site+reliability+engineer&amp;location=Remote) for remote SRE jobs.\n\nPython, Go, and Java were the [most sought-after SRE skills](https://www.indeed.com/jobs=site%20Reliability%20Engineer&amp;l&amp;vjk=829f6081218e60bd) listed on Indeed.\n\nAccording to Indeed, SREs transition to \"DevOps engineer\" at a high rate.\n\n## DevOps engineers bridge the gap\n\nDevOps engineers, on the other hand, concentrate on removing obstacles to production and automation and [making development and IT work well together](https://harness.io/blog/sre-vs-devops/).\n\nLike SREs, DevOps engineers need to be good at working and communicating with others, eliminating barriers to increase speed and quality of code delivery. With typically less need to be on call, the DevOps engineer\nmay have a more favorable work-life balance than an SRE, who can have around-the-clock call.\n\nDevOps engineer work responsibilities include such things as analysis of technology utilized within the company and then developing steps and processes to improve and expand upon them. Project management is another key function, establishing milestones for departmental contributions and establishing processes to facilitate collaboration.\n\nThe educational requirements for the two roles are comparable, with a Bachelor’s degree in computer science or engineering or higher as the usual price of admission.\n\nAccording to Glassdoor, the salary range for DevOps engineers is slightly lower than that of SREs, from a low of about $63,000 up to a high of $234,000 for someone with [2 to 4 years of experience](https://www.glassdoor.com/Salaries/us-devops-engineer-salary-SRCH_IL.0,2_IN1_KO3,18.htm). \n\nDevOps engineer positions are easier to find than SREs. Glassdoor has more than 6,000 DevOps engineer job posts. Indeed has more than 17,000. And ZipRecruiter has [more than 81,000](https://www.ziprecruiter.com/candidate/search?radius=5000&amp;search=devops+engineer&amp;location=Remote) remote DevOps engineer listings.\n\n## New to the game\n\n[Cloud native](/topics/cloud-native) development and the desire to have a unified DevOps platform have brought a new role, the DevOps platform engineer, a position that [works in parallel with the site reliability engineering function](/topics/devops/what-is-a-devops-platform-engineer/).\n\nPlatform engineering teams apply development principles to accelerate software delivery, ensuring app dev teams are productive in all aspects of the lifecycle. Platform engineers focus on the entire software development lifecycle from source to production. From this introspective process, they build a workflow that enables application\ndevelopers to [rapidly code and ship software](https://www.getambassador.io/resources/rise-of-cloud-native-engineering-organizations/).\n\nYou can find a helpful description of the roles of SRE vs. DevOps engineer vs. platform engineer [here](https://iximiuz.com/en/posts/devops-sre-and-platform-engineering/).\n\nBut it’s hard to find much career data for this emerging role. Glassdoor, Indeed, and ZipRecruiter do not yet separate out this role from the category of “DevOps engineer,” and consolidated salary and career path data is not available at this time. It is reasonable to conclude this new role will have higher pay based on rarer skill sets and job experience. Suffice to say, this is a hot area and bears watching.\n\n## Benefits of a DevOps career\n\nThe DevOps industry (and technology as a whole) is constantly evolving. And that creates a lot of opportunities. There are lots of job opportunities cropping up based on how technology changes, and this also means that you can have many chances to learn a new skill and score a role where there is an employee shortage. \n\nThere is a high demand for fresh new talent who are also eager to keep learning and adapting to an ever-changing environment. And in this evolving world of DevOps, the more change that happens means there are endless learning opportunities that will help build you up professionally. This makes you a competitive hire in the future, as well as becoming part of a technological landscape that will always be needed. \n\n## Skills required for a DevOps career\n\nWhether you have goals to become an SRE, a full-fledged DevOps engineer, or start slow and figure out where you want to work in the DevOps space, there are both soft and technical skills that definitely are or may require for you to be successful in whichever role you pursue.\n\nSome soft skills include:\n\n1. **The ability to be flexible.** Projects can stop and start and change at any time for lots of reasons. Things break and get buggy on the regular.  Being able to go with that flow and maintain good levels of productivity and professionalism will take you far. \n2. **Good communication skills.** DevOps projects are rarely simple and not only require the ability to communicate your thoughts but the patience to listen to others. \n3. **Ability to work collaboratively.** There are multiple people involved with any given DevOps project. Be prepared to have discussions about various projects and be part of the development process as a team, not as an individual.\n\nSome of the more technical skills that can help your job pursuits include (but are by no means limited to):\n\n1. **CI/CD.** Aspiring engineers should look for ways to add CI/CD concepts to existing personal projects and code. Creating your own personal projects involving CI/CD is a good way to test your deployment skills while also creating a good proof of skills reference for job interviews. \n2. **Coding skills.** Familiarity with multiple languages, such as Rust, Java, JavaScript, Ruby, Python, PHP, Bash, and many more is important for a DevOps engineer. You need to be able to write and fix issues in multiple programming languages. \n3. **Cloud computing.** Lots of application infrastructures revolved around cloud technologies, so having a basic knowledge of cloud computing will give you a competitive edge. \n4. **Automation knowledge.** A lot of working in DevOps is being able to automate time-consuming processes that need to happen all at once. Diving into some automation knowledge will help you more easily integrate with a new DevOps role. \n\n## The future of DevOps\n\nAccording to a newer Forrester report, future success in DevOps will need people and their organizations to be open to a mindset and technology shift. New tools will come around, common practices may shift, and DevOps teams need to be able to adapt to changes while continuing to work together to deliver top-quality work. \n\nA few trends to keep an eye on as time progresses are serverless computerless architecture, [the rise of DevSecOps](/topics/devsecops/), and low-code/no-code development to deploy applications swiftly with higher agility.\n",[9,813,1158],{"slug":1712,"featured":6,"template":686},"career-spotlight-sre-vs-devops-engineer-vs-devops-platform-engineer","content:en-us:blog:career-spotlight-sre-vs-devops-engineer-vs-devops-platform-engineer.yml","Career Spotlight Sre Vs Devops Engineer Vs Devops Platform Engineer","en-us/blog/career-spotlight-sre-vs-devops-engineer-vs-devops-platform-engineer.yml","en-us/blog/career-spotlight-sre-vs-devops-engineer-vs-devops-platform-engineer",{"_path":1718,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1719,"content":1725,"config":1732,"_id":1734,"_type":14,"title":1735,"_source":16,"_file":1736,"_stem":1737,"_extension":19},"/en-us/blog/cd-automated-integrated",{"title":1720,"description":1721,"ogTitle":1720,"ogDescription":1721,"noIndex":6,"ogImage":1722,"ogUrl":1723,"ogSiteName":670,"ogType":671,"canonicalUrls":1723,"schema":1724},"GitLab’s automated and integrated continuous delivery","Learn about how the power of GitLab Auto DevOps can help increase productivity and speed up releases.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681872/Blog/Hero%20Images/CD-2st-mkt-diff-cover-1275x849.jpg","https://about.gitlab.com/blog/cd-automated-integrated","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab’s automated and integrated continuous delivery\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2021-01-22\",\n      }",{"title":1720,"description":1721,"authors":1726,"heroImage":1722,"date":1728,"body":1729,"category":1359,"tags":1730},[1727],"Cesar Saavedra","2021-01-22","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nOrganizations adopting DevOps best practices to software delivery spend time and effort designing, building, testing, integrating, and maintaining CI/CD pipelines for their different projects. Just as they must spend some of their time maintaining their business applications instead of innovating, they must do the same for their pipelines. Freeing your developers so that they can spend more of their time creating new business applications and differentiating value to the business is of utmost importance to remain competitive in a world where organizations must be digital leaders to succeed in the marketplace.\n\nGitLab provides [Auto DevOps](/topics/devops/), which are prescribed out-of-the-box CI/CD templates that auto-discover the source code you have. Based on best practices, they automatically detect, build, test, deploy, and monitor your applications. Auto DevOps save your developers from implementing their own pipelines so that they can spend more time innovating. In the following paragraphs, we go over how the power of Auto DevOps automates and integrates your continuous delivery to help increase productivity and speed up releases.\n\n## Enabling Auto DevOps\n\nIt’s very easy to enable Auto DevOps for your application. All you need to do is go to your Project Settings and select the configuration you desire for Auto DevOps. As the picture below depicts, you can select the deployment strategy to “Automatic deployment to staging, manual deployment to production”:\n\n![autodevops-on](https://about.gitlab.com/images/blogimages/cd-automated-integrated/autodevops-on.png){: .shadow.medium.center.wrap-text}\n\nThe Auto DevOps pipeline shifts work left to find and prevent defects as early as possible in the software delivery process.\n\nThe pipeline then deploys the application to staging for verification and then to production in an incremental fashion. Auto DevOps saves you and your developers from implementing your own pipelines so that you can spend more time innovating.\n\n## Auto DevOps stages and jobs\n\nThe stages and jobs of the Auto DevOps pipeline vary according to the way you configured it. You can also customize the prescribed Auto DevOps pipeline or reuse only portions of it. Let’s review the prescribed stages and jobs for a simple Java application.\n\n1) First you find the Build stage. Auto Build creates a build of the application using an existing Dockerfile or buildpacks. The resulting Docker image is pushed to the built-in Container Registry. \n\n![auto-build](https://about.gitlab.com/images/blogimages/cd-automated-integrated/auto-build.png){: .shadow.medium.center.wrap-text}\n\nAll these steps are automatically executed on your application so that you can spend more time delivering value to the business.\n\n2) Next is a variety of tests under the Test stage. Auto DevOps includes jobs for static analysis and code checks, For identifying security issues in containers, For analyzing project dependencies and security issues, For scanning license dependencies, For detecting credentials and secrets exposure, For running security analysis of Java code, And for specific unit tests for the language and framework.\n\n![auto-test](https://about.gitlab.com/images/blogimages/cd-automated-integrated/auto-test.png){: .shadow.medium.center.wrap-text}\n\nAll these tests increase the quality of code, compliance and reliability that translate into a highly resilient production environment.\n\n3) The review stage contains a single job that spins up an ephemeral environment to be used by the Dynamic Application Security Testing or DAST. Likewise the Dast stage has the job, Auto Dynamic Application Security Testing, which analyzes the current code and checks for potential security issues by running (Open Web Application Security Project) OWASP-related tests.\n\n![auto-review-dast](https://about.gitlab.com/images/blogimages/cd-automated-integrated/auto-review-dast.png){: .shadow.medium.center.wrap-text}\n\n4) The prescribed stages and jobs in Auto DevOps vary depending on how you configure it. In this example, the user has selected “Automatic deployment to staging, manual deployment to production” when enabling Auto DevOps, so towards the CD portion of the pipeline, we see the staging stage, which contains a single job. The staging job deploys the user’s application to the staging environment. It will also instantiate the staging environment, if needed.\n\n![auto-staging](https://about.gitlab.com/images/blogimages/cd-automated-integrated/auto-staging.png){: .shadow.medium.center.wrap-text}\n\n5) The production stage is manual and contains four jobs to incrementally deploy his application to production. An incremental rollout decreases the risk of a production outage or downtime. By releasing production changes gradually, error rates or performance degradation can be monitored, and if there are no problems, all of production can be updated.\n\n![auto-prod](https://about.gitlab.com/images/blogimages/cd-automated-integrated/auto-prod.png){: .shadow.medium.center.wrap-text}\n\n6) The user has been prescribed a performance stage with a single job with the same name. Auto Browser Performance Testing measures the browser performance of each web page and reports on any degradation or improvement so that appropriate action can be taken.\n\n![auto-browser-perf](https://about.gitlab.com/images/blogimages/cd-automated-integrated/auto-browser-perf.png){: .shadow.medium.center.wrap-text}\n\n7) The last stage is the cleanup stage, which contains a job that brings down and frees all resources of the ephemeral DAST environment that was brought up earlier in the CI portion of the pipeline.\n\n![auto-cleanup](https://about.gitlab.com/images/blogimages/cd-automated-integrated/auto-cleanup.png){: .shadow.medium.center.wrap-text}\n\nThis entire prescribed CI/CD pipeline, with all its stages and jobs, is based on best practices and is automatically run for the user’s project saving them time and effort from developing their own pipeline.\n\n## Auto Review Apps\n\nAs developers collaborate on a project, Auto DevOps automatically includes Auto Review Apps, which stands up an ephemeral environment for stakeholders to review the running application with proposed changes before they are merged to the main branch. The teardown and freeing of the resources of the ephemeral review environment are also automatically done by Auto DevOps once the merge takes place.\n\n![auto-review-apps](https://about.gitlab.com/images/blogimages/cd-automated-integrated/auto-review-apps.png){: .shadow.medium.center.wrap-text}\n\n## Modifying Auto DevOps\n\nHere are some ways that you can modify Auto DevOps.\n\n1) **Customization via environment variables**. If you would like to skip some of the stages and jobs in Auto DevOps, you can do this via project variables. For example, say you are using all open source licensed software within your project and you are pretty confident about your web application performance, and you’d also like to add the ability to do canary deployments. You can customize Auto DevOps via environment variables to skip the license-scanning and performance jobs and add canary deployments to your project by creating and setting specific environment variables as shown below.\n\n![auto-env-vars](https://about.gitlab.com/images/blogimages/cd-automated-integrated/auto-env-vars.png){: .shadow.medium.center.wrap-text}\n\nYou could also use the GitLab APIs to script these modifications if he so desired.\n\n2) **Customization by editing the DevOps pipeline**. Another way to customize the Auto DevOps pipeline is by adding it to your own project and then making changes to it.\nBelow you can see a screen snapshot of an Auto DevOps pipeline edit where LICENSE_MANAGEMENT and web PERFORMANCE tests are being disabled.\n\n![autodevops-pipeline-edit](https://about.gitlab.com/images/blogimages/cd-automated-integrated/autodevops-pipeline-edit.png){: .shadow.medium.center.wrap-text}\n\n3) **Customization by using only portions of Auto DevOps**. You could also leverage portions of Auto DevOps in your own pipeline by including specific templates. In the smaller pipeline below, only the Auto Build and Auto Test capabilities of Auto DevOps are being reused.\n\n![autodevops-portions](https://about.gitlab.com/images/blogimages/cd-automated-integrated/autodevops-portions.png){: .shadow.medium.center.wrap-text}\n\nThe power of Auto DevOps automates and integrates your continuous delivery to help speed up your releases by saving you time from having to write your own pipelines. By using Auto DevOps you can accelerate your product delivery times and bring differentiating application features faster to market.\n\nIf you’d like to see the power of GitLab Auto DevOps in action, watch this [video](https://youtu.be/blJT8f6ZDH8).\n\nFor more information, visit [LEARN@GITLAB](https://about.gitlab.com/learn/).\n\nPhoto by [Tim Carey](https://unsplash.com/@baudy?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/formula-1?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n",[977,9,1731],"demo",{"slug":1733,"featured":6,"template":686},"cd-automated-integrated","content:en-us:blog:cd-automated-integrated.yml","Cd Automated Integrated","en-us/blog/cd-automated-integrated.yml","en-us/blog/cd-automated-integrated",{"_path":1739,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1740,"content":1746,"config":1751,"_id":1753,"_type":14,"title":1754,"_source":16,"_file":1755,"_stem":1756,"_extension":19},"/en-us/blog/cd-solution-overview",{"title":1741,"description":1742,"ogTitle":1741,"ogDescription":1742,"noIndex":6,"ogImage":1743,"ogUrl":1744,"ogSiteName":670,"ogType":671,"canonicalUrls":1744,"schema":1745},"How to use GitLab tools for continuous delivery","Learn how to use GitLab technology to release software faster and with less risk.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682956/Blog/Hero%20Images/CD-continuous-nature-cover-880x586.jpg","https://about.gitlab.com/blog/cd-solution-overview","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab tools for continuous delivery\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2020-12-17\",\n      }",{"title":1741,"description":1742,"authors":1747,"heroImage":1743,"date":1748,"body":1749,"category":791,"tags":1750},[1727],"2020-12-17","\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2021-04-01.\n\nEach organization is unique in how they adopt continuous delivery (CD) principles, but the journey to modernize and enhance your software release process can be conducted in phases. In this blog post, we unpack some of the tools companies can use to adopt continuous delivery (CD), and explain how companies can reach continuous delivery in three key stages. The good news is, regardless of how you get there, GitLab offers a solution that allows companies to modernize their release process at their own pace and in their own way.\n\n## Consolidate disparate tools into a single platform\n\nThe first step to reaching [continuous delivery](/topics/continuous-delivery/) is to consolidate the number of disparate tools in your pipeline by using the tools and capabilities baked into the GitLab product. In this section, we summarize some of the fundamental components of GitLab and give examples of how they work.\n\nGitLab users can track issues and merge requests using [milestones](https://docs.gitlab.com/ee/user/project/milestones/#milestones), which also help with setting time-bound goals. Milestones can be used as Agile sprints and releases, and allow you to organize issues and merge requests into a one group, with an optional start date and an optional due date.\n\n![Example of GitLab milestone from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/milestone.png)\nScreenshot shows example milestone in GitLab.\n\n[Issues are a fundamental tool in GitLab](https://docs.gitlab.com/ee/user/project/issues/#issues), and include many components to help users communication information about product problems, new features, and more.\n\n[Merge requests (MRs) are created to merge one branch into another](https://docs.gitlab.com/ee/user/project/merge_requests/). MRs are also where solutions are developed and is a key input to the release planning process.\n\nBoth issues and MRs are core components of a release and allow for the audit and tracking of application changes created by a large group of DevOps engineers, system administrators, and developers. We often use Epics in the release planning process. [Epics are used to track groups of issues with the same theme](https://docs.gitlab.com/ee/user/group/epics/#epics). In the example below, an Epic was created for all the UI-related issues in a project.\n\n![Example of GitLab epic for frontend work](https://about.gitlab.com/images/blogimages/cd-solution-overview/epic.png)\nAn example of an Epic for frontend work in GitLab.\n\n[Iterations are a relatively new tool that allows users to track issues over time](https://docs.gitlab.com/ee/user/group/iterations/#iterations) and helps to track velocity and volatility metrics. Iterations can also be used with milestones and can track a project's sprints using the detailed iterations pages, which include many progress metrics.\n\n![Example iteration from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/iteration.png)\nThis screenshot shows an example of how iterations work in GitLab.\n\nThe [Roadmap tool assembles epics, milestones, and iterations in a timeline format](https://docs.gitlab.com/ee/user/group/roadmap/#roadmap), which makes it easier to visually track all progress toward a release and helps the user streamline the release process.\n\n![Example of roadmap from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/roadmap.png)\nThis screenshot shows an example of roadmap in GitLab.\n\nGitLab offers many approval gates for your release. Set a [deploy freeze window](https://docs.gitlab.com/ee/ci/environments/deployment_safety.html) to temporarily suspend automated deployments to production. The deploy freeze window prevents unintended production releases during a particular time frame to help reduce uncertainty and risk of unscheduled outages.\n\n![Example of deploy freeze window from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/freeze.png)\nThis screenshow shows an example deploy freeze window in GitLab.\n\nRelated to the deploy freeze window, users can protect the production environment for a release to prevent unintentional releases. Deploy freeze windows protect the production environment by specifying who is allowed to deploy to the environment. Assigning specific roles and responsibilities streamlines the approval gates and release process.\n\n![protected-env](https://about.gitlab.com/images/blogimages/cd-solution-overview/protected-env.png)\n\nWhen it's ready, the [user can create the release which automatically generates the release evidence](https://docs.gitlab.com/ee/api/releases/#collect-release-evidence). This streamlined process helps reduce release cycle times.\n\n![Example of release evidence from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/release-evidence.png)\nHere is an example of release evidence from a demo project in GitLab.\n\n## Implement continuous delivery\n\nThe capabilities described above help to establish some best practices for software continuous delivery. In this next phase of the CD cycle, every change is automatically deployed to the User Acceptance Testing env/Staging (with a manual deployment to production). In this scenario, there is no need for a deploy freeze, and the release manager can cut a release from staging at any point in time.\n\n[GitLab Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) helps users automatically create the release pipeline and relieves them from manually creating a pipeline. With Auto DevOps, users can automatically deploy to the staging environment and manually deploy to production and enable canary deployments. Auto DevOps, which is based on DevOps best practices, helps you streamline the release process.\n\n![Example of enabling Auto DevOps from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/enable-auto-devops.png)\nHow to enable Auto DevOps in GitLab.\n\nThe first job in Auto DevOps is the build job, as shown below:\n\n![build-job](https://about.gitlab.com/images/blogimages/cd-solution-overview/build-job.png)\nThe build job in GitLab Auto DevOps.\n\nThe build job applies the appropriate build strategy to create a Docker image of the application and stores it in the built-in Docker Registry.\n\n![Example of container registry from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/container-registry.png)\nSee the example of a container registry in GitLab.\n\nFaster and more reliable releases happen when you have build components like Docker images that are consistent, uniform, and readily available throughout the release process. GitLab also includes a built-in [Package Registry](https://docs.gitlab.com/ee/user/packages/) that supports many packaging technologies.\n\n![Example of package registry from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/package-registry.png)\nHere's what the package registry looks like in GitLab.\n\n[Review Apps](https://docs.gitlab.com/ee/ci/review_apps/#review-apps) allow the user to visualize what features will go into production. As updates are made to the application via MRs, the MRs kick off Review Apps, which streamlines the review process, including the automatic creation and destruction of an ephemeral review environment. Using Review Apps, stakeholders can verify the updates to the application before the changes are merged to the main line. Review Apps help increase code quality reducing the risk of unexpected production outages.\n\n![Example Review Apps from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/review-apps.png)\nAn example of Review Apps in GitLab from a demo project.\n\nOnce an application is built and passes many automated tests, checks and verifications, the Auto DevOps pipeline automatically stands up a staging environment and deploys the application to staging.\n\n![Example staging environment from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/staging-env.png)\nAn example staging environment in GitLab.\n\nAt this point, a user can manually deploy the updated application as a canary deployment to the production environment. In doing so, a user ships features to only a portion of the pods fleet and watches their behavior as users visit the temporarily deployed feature. If everything checks out, the next step is to deploy the feature to production. After deploying to production, roll out the Canary deployment to 50% of the production pods. Incremental rollouts lower the risk of production outages and delivers a better user experience and customer satisfaction. Advanced deployment techniques, like canary, incremental, and Blue-Green also improve development and delivery efficiency, and streamlines the release process.\n\n![Example incremental rollout from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/rollout.png)\nHow incremental rollout works in GitLab.\n\n![live-env-button](https://about.gitlab.com/images/blogimages/cd-solution-overview/live-env-button.png)\nTo check the running application for integrity, you can click on the \"Open live environment\" button.\n\nClicking this button will open up the application in a different browser tab. But what if you run into an application error? As shown below:\n\n![Example application error from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/app-error.png)\nThis is what an application error will look like in GitLab.\n\nIf you encounter an app error, you could decide to perform a rollback by drilling down into the production environment page and identifying the release that had been running before the last deployment. This page is an auditable sequence of changes that have been applied to the production environment. The rollback process starts with the click of a button. Rollbacks speed up recovery of production in case of failures and lowers outage times, which improves the user experience.\n\n![Example rollback from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/rollback.png)\nRollback in GitLab to speed up production recovery.\n\nPipelines usually run automatically, but to schedule a pipeline once a day at midnight, for example, so staging can have the most recent version of the application each day, go to CI/CD->Schedules. Scheduling pipelines can improve the efficiency of the development life cycle and release processes.\n\n![Example of pipeline scheduling from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/pipeline-sched.png)\nHow to schedule a pipeline to run in the future.\n\nWhile the application is running in production, track how the release is performing and quickly identify and troubleshoot any production issues. There are a few ways to do this. One way is to access the \"Monitoring\" feature for a specific environment to track system and application metrics, such as system and pod memory usage, and the number of cores used. The monitoring tracking includes markers (small rocket icon) when updates were introduced to the environment, so that fluctuations in the metrics can be correlated to a specific update.\n\n![Example monitoring capabilities from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/monitoring.png)\nExplore monitoring capabilities in GitLab.\n\nMonitoring reduces the time to identify, resolve and preempt production problems, which lowers the risk of unscheduled outages. It also provides an opportunity for monitoring business activity and optimizes cloud costs. This type of monitoring is not only useful to release managers but also to DevOps engineers, application operators, and platform engineers.\n\nAnother way to monitor the release is by creating alerts to detect out-of-range metrics, which are visible on the overall operations metrics dashboard as well as on each specific environment window. Alerts can also automatically trigger ChatOps and email messages to appropriate individuals or groups.\n\n![Example alerts from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/alerts.png\nExample alerts in GitLab.\n\nYou can manage alerts from the [Operations Alerts window](https://docs.gitlab.com/ee/operations/incident_management/alerts.html), a single location from which you can assess and handle alerts, which may include the manual or automatic rollback of a release.\n\n![Example alerts dashboard from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/alerts-window.png)\nWhat the he alerts dashboard looks like on GitLab.\n\nUsers can track and monitor the release progress through [Value Stream Analytics](https://docs.gitlab.com/ee/development/value_stream_analytics.html#value-stream-analytics-development-guide), where you can check your project or group statistics over time and see how your team improves in the number of new issues, commits, deploys, and deployment frequency. Value Stream Analytics is useful to quickly determine the velocity of a given project. It points to bottlenecks in the development process, allowing management to uncover, triage, and identify the root cause of slowdowns in the software development life cycle.\n\n![Example value stream analytics from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/value-stream.png)\nValue stream analytics in GitLab.\n\nLastly, another way to track and monitor the release is through [Pipeline analytics](https://docs.gitlab.com/ee/user/analytics/ci_cd_analytics.html#pipeline-success-and-duration-charts). Pipeline analytics shows the history of your pipeline successes and failures, as well as how long each pipeline runs. This helps explain the health of your projects and their continuous delivery.\n\n![Example pipeline analytics from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/pipeline-analytics.png)\nScreenshot shows example pipeline analytics in GitLab.\n\nThe [Operations dashboard](https://docs.gitlab.com/ee/user/operations_dashboard/#operations-dashboard) can contain more than one project, and allows users to oversee more than one release. This dashboard provides a summary of each project's operational health, including pipeline and alert status.\n\n![Example operations dashboard from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/ops-dashboard.png)\nExample of operations dashboard in GitLab.\n\nRelease managers can also access the [environments dashboard](https://docs.gitlab.com/ee/ci/environments/environments_dashboard.html#environments-dashboard) to provide a cross-project, environment-based view that lets you see the big picture of what is happening in each environment.\n\n![Example environments dashboard from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/env-dashboard.png)\nThe environments dashboard in GitLab.\n\nAnother option is to drill down into a specific environment to see all the updates applied to the environment.\n\n![Example production environment dashboard from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/prod-env-dashboard.png)\nThe production environment dashboards shows all updates applied to the environment.\n\nAll these dashboards offer operations insights that are necessary to understand how a release is performing in production and quickly identify and troubleshoot any production issues.\n\n## Implement continuous deployment\n\nThe third phase in the journey is continuous deployment, where users can send updates directly to production. Instead of manually triggering deplyments, continuous deployment sends changes to production production auomatically (no human intervention is required). Teams can only achieve continuous deployment once continuous delivery is already in place.\n\nTo introduce a feature to a segment of end-users in a controlled manner in production, create [feature flags](/blog/feature-flags-continuous-delivery/). Feature flags help reduce risk and let the user conduct controlled tests and separate feature delivery from customer launch.\n\n![Example feature flag from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/feature-flag.png)\nFeatures flags in GitLab.\n\nA project's audit events dashboard will record what user introduced a feature flag.\n\n![Example audit events dashboard from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/events-dashboard.png)\nScreenshot shows example audit events dashboard in GitLab.\n\nCheck security and compliance-related items of the project by visiting the [Security dashboard](https://docs.gitlab.com/ee/user/application_security/security_dashboard/#gitlab-security-dashboards-and-security-center).\n\n![Example security dashboard from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/sec-dashboard.png)\nThe security dashboard in GitLab.\n\nThese dashboards help you preempt out-of-compliance scenarios to avoid penalties. They also streamline audits, provide an opportunity to optimize cost, and lower risk of unscheduled production outages.\n\nWe have reviewed how GitLab can help you make your releases safe, low risk, worry-free, consistent, and repeatable.\n\nWhether you are just starting your journey into DevOps, or already in the midst of implementing DevOps processes, [GitLab's continuous delivery](/stages-devops-lifecycle/continuous-delivery/) can help you every step of the way with capabilities built on DevOps and CD best practices.\n\n## Watch and learn\n\nMore of a video person? Tune in below to see GitLab’s continuous delivery solution in action.\n\n\u003Chttps://www.youtube-nocookie.com/embed/L0OFbZXs99U>\n\nFor more information, visit [LEARN@GITLAB](/learn/).\n",[977,9,1731],{"slug":1752,"featured":6,"template":686},"cd-solution-overview","content:en-us:blog:cd-solution-overview.yml","Cd Solution Overview","en-us/blog/cd-solution-overview.yml","en-us/blog/cd-solution-overview",{"_path":1758,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1759,"content":1765,"config":1770,"_id":1772,"_type":14,"title":1773,"_source":16,"_file":1774,"_stem":1775,"_extension":19},"/en-us/blog/cd-unified-monitor-deploy",{"title":1760,"description":1761,"ogTitle":1760,"ogDescription":1761,"noIndex":6,"ogImage":1762,"ogUrl":1763,"ogSiteName":670,"ogType":671,"canonicalUrls":1763,"schema":1764},"GitLab's unifiied and integrated monitoring strategies","Learn about GitLab’s unified and integrated monitoring capabilities and advanced deployment strategies.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681771/Blog/Hero%20Images/CD-1st-mkt-diff-cover-1275x849.jpg","https://about.gitlab.com/blog/cd-unified-monitor-deploy","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's unifiied and integrated monitoring strategies\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2020-11-23\",\n      }",{"title":1760,"description":1761,"authors":1766,"heroImage":1762,"date":1767,"body":1768,"category":1359,"tags":1769},[1727],"2020-11-23","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nA well integrated and consistent approach to monitoring what is running in production and how it is running can provide not only useful information about the infrastructure and applications but also a feedback loop about how your end users are utilizing your business applications. The ability to visualize what goes into production, what to deploy to production, and who to deploy it to can provide organizations the data to help them select and prioritize capabilities that matter to their customers. In addition, the ability to monitor performance and tracing of deployments allows them to preempt production problems, quickly troubleshoot issues and rollback a release, if needed.\n\nGitLab provides the ability to monitor the performance of a deployment and easily rollback if needed. It also empowers you to choose what to deploy and who to deploy to in production via Feature Flags as well as advanced deployment strategies, like Canary deployments, in a consistent, repeatable, and uniform manner to help make your releases safe, low risk, and worry-free.\n\n\nLet’s first delve into how GitLab provides the capabilities to quickly release, identify production problems and quickly roll back.\n\nFor a release manager, the Environment Dashboard provides a cross-project environment-based view with the big picture of what is going on in each environment:\n\n![environment dashboard](https://about.gitlab.com/images/blogimages/cd-unified-monitor-deploy/Env-dashboard.png){: .shadow.medium.center.wrap-text}\n\nThe Environment Dashboard also gives easy access to the CD pipeline. In the picture above, clicking on the “blocked” link takes you to the CD pipeline view:\n\n![CD pipeline](https://about.gitlab.com/images/blogimages/cd-unified-monitor-deploy/CD-pipeline.png){: .shadow.medium.center.wrap-text}\n\nFrom the CD pipeline, a release manager can perform a canary deployment and also roll out to production incrementally, for example. The performance job above runs web browser performance tests and determines any degradation or improvement in the measurements and reports them as shown below:\n\n![webperf errors](https://about.gitlab.com/images/blogimages/cd-unified-monitor-deploy/Review-webperf-errors.png){: .shadow.medium.center.wrap-text}\n\nA release manager can take this information into consideration to determine whether or not these errors warrant a rollback of the release from production.\n\n![rollback button](https://about.gitlab.com/images/blogimages/cd-unified-monitor-deploy/Rollback-click.png){: .shadow.medium.center.wrap-text}\n\nFrom the production environment window, depicted above, clicking on the rollback environment button, will reset the production to its previous working state.\n\nIT teams often run into issues when building and releasing software and without direct user feedback, they often build out too many features, many of which go unused. Without the ability to test in production, IT organizations spend more time on testing, prolonging release cycles, but quality is only marginally improved. Modern IT teams can overcome these issues by using experimentation systems capabilities, such as feature flags and canary deployments.\n\n![feature flags screen](https://about.gitlab.com/images/blogimages/cd-unified-monitor-deploy/ff-screen.png){: .shadow.medium.center.wrap-text}\n\nGitLab supports Feature Flags as shown above. In the example, the defined feature flag named “prods-in-alpha-order-ff” has three strategies:\n\n- For the production environment: provide the feature to 50% of users based on the availability of their IDs\n\n- For the staging environment: provide the feature to the users listed in the user list “prods-in-alpha-order-user-list”\n\n- For the review environment: provide the feature to only one user.\n\nFeature Flags can also be combined with canary deployments. For example, in the picture below, the release manager has chosen to release the canary to half of the nodes in production:\n\n![50 percent rollout](https://about.gitlab.com/images/blogimages/cd-unified-monitor-deploy/CD-pipeline-50-percent.png){: .shadow.medium.center.wrap-text}\n\nAnd this combined deployment can be visualized via the deploy board as follows:\n\n![deploy board](https://about.gitlab.com/images/blogimages/cd-unified-monitor-deploy/deploy-board.png){: .shadow.medium.center.wrap-text}\n\nAbove, production has four nodes, two of which are running the new canary deployment, and the other two are still running the current production deployment.\n \nThe combination of canary deployments and feature flags can help gather direct users’ feedback to determine what features are relevant to them, so that an IT organization can focus on these, to shorten release cycle times and deliver higher quality and differentiating value to their users.\n\nLastly, integrated monitoring plays an important role in the feedback loop for these advanced deployment strategies and experimentation systems. With GitLab’s unified and integrated monitoring, you can track system and application metrics cluster-wide as well as per pod.\n\n![clusterwide monitoring](https://about.gitlab.com/images/blogimages/cd-unified-monitor-deploy/clusterwide-monitoring.png){: .shadow.medium.center.wrap-text}\n\nIn the picture above, you can see the dashboards that monitor clusterwide metrics. And the picture below shows the dashboards that monitor pod-specific metrics:\n\n![podspecific monitoring](https://about.gitlab.com/images/blogimages/cd-unified-monitor-deploy/podspecific-monitoring.png){: .shadow.medium.center.wrap-text}\n\nGitLab provides the ability to monitor the performance of a deployment and easily rollback if needed. It also empowers you to choose what to deploy and who to deploy to in production via Feature Flags as well as advanced deployment strategies, like Canary deployments, in a consistent, repeatable, and uniform manner to help make your releases safe, low risk, and worry-free.\n\nIf you’d like to see some of GitLab’s unified and integrated monitoring capabilities and advanced deployment strategies in action, watch this [video](https://youtu.be/ihdxpO5rgSc).\n\nFor more information, visit [LEARN@GITLAB](https://about.gitlab.com/learn/).\n\n\n",[109,9,1731],{"slug":1771,"featured":6,"template":686},"cd-unified-monitor-deploy","content:en-us:blog:cd-unified-monitor-deploy.yml","Cd Unified Monitor Deploy","en-us/blog/cd-unified-monitor-deploy.yml","en-us/blog/cd-unified-monitor-deploy",{"_path":1777,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1778,"content":1784,"config":1790,"_id":1792,"_type":14,"title":1793,"_source":16,"_file":1794,"_stem":1795,"_extension":19},"/en-us/blog/celebrating-17-years-of-git",{"title":1779,"description":1780,"ogTitle":1779,"ogDescription":1780,"noIndex":6,"ogImage":1781,"ogUrl":1782,"ogSiteName":670,"ogType":671,"canonicalUrls":1782,"schema":1783},"Celebrating 17 years of Git","Here's the history, tips, tricks and even a mea culpa to help celebrate the 17th anniversary of Git.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679424/Blog/Hero%20Images/gitbirthday.jpg","https://about.gitlab.com/blog/celebrating-17-years-of-git","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Celebrating 17 years of Git\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-04-07\",\n      }",{"title":1779,"description":1780,"authors":1785,"heroImage":1781,"date":1786,"body":1787,"category":679,"tags":1788},[851],"2022-04-07","\n\nSeventeen years ago, the Linux community embraced Git as its universal open source version control solution. Created by Linus Torvalds, Git replaced BitKeeper, a proprietary but free-of-charge option that worked, to a point, until it didn’t (and ultimately started costing a fee).\n\nIn the years since, there’s been little to no agreement on what the term “Git” actually means but there’s no disputing its rockstar status in the DevOps world. Tens of millions developers rely on Git’s fast and seamless branching capabilities every single day. In fact, 85% of DevOps professionals who took our [2021 Global DevSecOps Survey](/developer-survey/) said they use Git for source control.\n\nSo, to honor this anniversary, we share our favorite Git tips and tricks and look back at the origins of its name, its 15th anniversary celebration, and even a declaration from one of our own who was certain Git would _never be in his toolkit_. No, really.\n\n## The origin of the name Git\n\nThere’s not much quirky or charming about the world of DevOps, but the theories around the origin of the name Git may be an exception. Torvalds claimed to have named Linux after himself, and he said Git (British slang for “jerk”) was no different. “I’m an egotistical b*stard, and I name all my projects after myself,” he [said at the time](https://git-scm.com/book/en/v2/Getting-Started-A-Short-History-of-Git). \n\nThe source code’s README takes the story in a different direction: Git is easy to pronounce, not used by UNIX, and could sound like “get.” It could be [British shade-throwing](http://www.peevish.co.uk/slang/english-slang/g.htm?qa=150&ss360SearchTerm=git#git), or it could stand for “global information tracker” (the choice of those happily working with a functioning tool). And for those frustrated with Git, there’s also “goddamn idiotic truckload of sh*t.”\n\n## Tips and tricks for better Git\n\nIs it possible to improve on a tool that so many use every single day? Actually, it is, starting with 15 ways [to get a better Git workflow](/blog/15-git-tips-improve-workflow/). Learn how to:\n\n- autocomplete commands\n- use Git blame more efficiently\n- reset files\n- understand the plugins\n\nAlso, Git can help [keep merge requests tidy and humming along](/blog/start-using-git/).\n\nFor an exhaustive look at how GitLab uses Git internally, including .gitconfig on steroids, the lowdown on aliases, and command line tips, we’ve [gathered a life-changing list](/blog/git-tips-and-tricks/). Also, here’s our take on [why (and how) to keep your Git history clean](/blog/keeping-git-commit-history-clean/) and how to do it using [interactive rebase](/blog/keep-git-history-clean-with-interactive-rebase/).\n\n## Remembering the 15th anniversary celebrations\n\nLandmark anniversaries always make people reflect, and Git’s 15th in 2020 was no exception. Not only was there [an actual party – Git Merge 2020](/blog/git-merge-fifteen-year-git-party/), our staff developer evangelist Brendan O’Leary admitted the unthinkable: Back in the day, he was [never ever going to use Git](https://www.computerweekly.com/blog/Open-Source-Insider/GitLab-guru-15-years-later-were-still-learning). Brendan, who obviously has learned his lesson, also teamed up with GitHub’s distinguished software engineer Jeff King to talk about [Git’s impact on software development](https://www.infoq.com/news/2020/04/git-fifteen-anniversary-qa/).\n\n## Practical Git\n\nAlthough there’s a lot to learn about Git, Brendan and other developers consistently stress the simplicity is what sets it apart. So here are three of our most bookmarked pages of straightforward Git advice:\n\n[6 common Git mistakes and how to fix them](/blog/git-happens/)\n[Understand the new Git branch default name](/blog/new-git-default-branch-name/) \n[A guide to Git for beginners](/blog/beginner-git-guide/)\n\nSo make sure to raise a glass to 17 years of Git and its many benefits.\n",[1789,9,749],"git",{"slug":1791,"featured":6,"template":686},"celebrating-17-years-of-git","content:en-us:blog:celebrating-17-years-of-git.yml","Celebrating 17 Years Of Git","en-us/blog/celebrating-17-years-of-git.yml","en-us/blog/celebrating-17-years-of-git",{"_path":1797,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1798,"content":1804,"config":1809,"_id":1811,"_type":14,"title":1812,"_source":16,"_file":1813,"_stem":1814,"_extension":19},"/en-us/blog/challenges-of-code-reviews",{"title":1799,"description":1800,"ogTitle":1799,"ogDescription":1800,"noIndex":6,"ogImage":1801,"ogUrl":1802,"ogSiteName":670,"ogType":671,"canonicalUrls":1802,"schema":1803},"The challenges of code reviews","The 2020 DevSecOps Report discovers that developers are bogged down by code reviews. Are they worth the trouble?","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663975/Blog/Hero%20Images/devsecopssurvey.png","https://about.gitlab.com/blog/challenges-of-code-reviews","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The challenges of code reviews\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2020-07-03\",\n      }",{"title":1799,"description":1800,"authors":1805,"heroImage":1801,"date":1806,"body":1807,"category":679,"tags":1808},[702],"2020-07-03","\n\n## Code review and quality challenges\n\nCode reviews are stressful. As a merge request owner, you're giving others an inside look at your abilities and thought processes. As a reviewer, there’s something quite daunting about serving as the last stop before code is merged to the main branch. When teams face uncertain processes, lengthy wait times, and lack of buy-in, an inherently difficult task can soon feel Sisyphean. In GitLab’s [2020 Global DevSecOps Survey](/developer-survey/previous/2020/), over 3600 software professionals shared their thoughts on code reviews, and the results reinforce that code reviews are a challenging aspect of software development.\n\n_Our [2022 Global DevSecOps Survey](/developer-survey/) has the latest insights from over 5,000 DevOps professionals. You can also compare it with [previous year surveys](/developer-survey/previous/)_\n\n## Why is code review important?\n\nCode reviews enable developers to more easily identify bugs, because they’re assessing the code with a fresh perspective. Shipping clean code decreases the likelihood of errors nestling into the main branch. Teams turn to code reviews as a way to share knowledge, mentor newer developers, and ease the burden of development. When everyone reviews code, there is no longer a single point of failure that can halt delivery and risk missing releases or business goals.\n\nStudies show that [code reviews increase collaboration](https://www.microsoft.com/en-us/research/publication/expectations-outcomes-and-challenges-of-modern-code-review/), because the process of working together to improve code quality creates a shared ownership of the codebase. Developers work towards a common goal rather than feel proprietary attachment to their lines.\n\n## Code reviews according to developers\n\nIn the 2020 DevSecOps Report, developers candidly shared their views on code reviews, with many highlighting the challenges of ensuring code quality standards. Here’s a look at what developers said about code reviews.\n\n| **How frequently does your team conduct code reviews?** |\n| Weekly |  48.9% |\n| Bi-weekly |  13.6% |\n| Monthly |  9.5% |\n\nMost respondents do code reviews weekly, indicating that teams are committed to making them part of their workflow.\n\n| **How do code reviews happen?** |\n| Messaging chat |  40.8% |\n| Offline |  28.6% |\n| Other |  20.9% |\n| Video |  9.7% |\n\nDocumentation is an important part of a successful code review. Authors should be able to refer to a checklist or assessment that highlights areas of improvement or excellence. Respondents indicated that the majority of code reviews occur in messaging chat or offline, which may enable written documentation.\n\n| **How do you prefer to do code reviews?** |\n| IDE | 44% |\n| Browser |  41.4% |\n| Code/text editor |  14.6% |\n\nConducting code reviews in integrated development environments and browsers is unsurprising, because there’s a low barrier to entry in participating and collaborating with others.\n\nMany respondents shared their thoughts about the specific challenges they face when doing code reviews.\n\n_“Code reviews can take a long time due to the lack of reviewers.”_\n\nWithout enough reviewers, code reviews can become overwhelming for the few people who make time for this task. Code reviews become a burdensome activity that can prevent certain team members from meeting goals and delivering.\n\n_“As an all-remote company, we haven't yet solved the problem of needing reviews from people in much different time zones and working hours. We have a strict code review process, and it often takes several days for the reviewer to respond to requests for review. Planning takes a while, and our code review process, while awesome, takes some time.”_\n\nWorking in a distributed team can have drawbacks, especially when waiting for domain experts or maintainers to review code. While processes are important in establishing workflow, it’s equally important not to slow down team members with process.\n\n_“Our code review process is disorganized. It took more time when reviewing the code and testing than expected.”_\n\nOn the other hand, not having an established review process can lead to stress, confusion, and pressure. Team members may dread code reviews due to the disorganization, which can lead to insufficient assessment.\n\n_“Some experts do not understand the importance of code review and regard it as a secondary task.”_\n\nWhen some team members do not value code review, they may deprioritize it and be reluctant participants. Collaboration is a key component in ensuring successful code reviews, and lack of buy-in can slowly erode morale.\n\n## Are code reviews worth it?\n\nBased on the frustration level found in the 2020 DevSecOps Survey, it’s hard not to wonder whether code reviews are worth the trouble. But all complaints aside, it’s clear that the review process is helpful.\n\n| **How valuable are code reviews?** |\n| Very valuable | 61.9% |\n| Moderately valuable |  33.3% |\n| They have no effect |  3.7% |\n\nCode reviews are worth the difficulties, because they help teams collaborate to maintain a clean codebase, learn from each other to develop new skills, and ensure that innovative solutions solve complex problems. In order for team members to feel like code reviews are valuable, IT leaders must invest time in establishing processes to ensure that everyone has the tools and knowledge to succeed.\n\n## Ready to learn more about code reviews?\n\nHere are a few resources to help you alleviate the challenges of code review.\n\n**[Read GitLab’s mandatory code review process →](https://docs.gitlab.com/ee/development/code_review.html)**\n\n**[Learn how to troubleshoot delays with GitLab’s Code Review Analytics tool →](/blog/troubleshoot-delays-with-code-review-analytics/)**\n\n**[Discover better code reviews GitLab style →](/blog/better-code-reviews/)**\n\n**[What blocks faster code releases? It starts with testing\n →](/blog/what-blocks-faster-code-release/)**\n\n**[Read about GitLab’s experience with Reviewer Roulette →](/blog/reviewer-roulette-one-year-on/)**\n",[771,681,9],{"slug":1810,"featured":6,"template":686},"challenges-of-code-reviews","content:en-us:blog:challenges-of-code-reviews.yml","Challenges Of Code Reviews","en-us/blog/challenges-of-code-reviews.yml","en-us/blog/challenges-of-code-reviews",{"_path":1816,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1817,"content":1823,"config":1831,"_id":1833,"_type":14,"title":1834,"_source":16,"_file":1835,"_stem":1836,"_extension":19},"/en-us/blog/chris-hill-devops-enterprise-summit-talk",{"title":1818,"description":1819,"ogTitle":1818,"ogDescription":1819,"noIndex":6,"ogImage":1820,"ogUrl":1821,"ogSiteName":670,"ogType":671,"canonicalUrls":1821,"schema":1822},"How Jaguar Land Rover embraced CI to speed up their software lifecycle","Inspiration, persistence, an attitude of continuous improvement – how adopting CI helped this vehicle company implement software over the air.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667619/Blog/Hero%20Images/chris-hill-jlr-does.jpg","https://about.gitlab.com/blog/chris-hill-devops-enterprise-summit-talk","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Jaguar Land Rover embraced CI to speed up their software lifecycle\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2018-07-23\",\n      }",{"title":1818,"description":1819,"authors":1824,"heroImage":1820,"date":1825,"body":1826,"category":1827,"tags":1828},[1378],"2018-07-23","\n\n[CI/CD](/topics/ci-cd/) gets us pretty excited anyway, but it's not often we get to talk about how it improves something as cool as a luxury car. Chris Hill, Head of Systems Engineering for Infotainment at Jaguar Land Rover, recently shared his own team's journey from feedback loops of 4-6 weeks to just 30 minutes, in this inspiring talk from [DevOps Enterprise](/stages-devops-lifecycle/) Summit London 2018.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/CEvjB-79tOs\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Key takeaways from Chris' talk\n\n### What's needed for transformation\n\n\u003Cdiv class=\"panel panel-default twitter-block\"> \u003Ca class=\"twitter-block-link panel-body\" href=\"http://twitter.com/share?text=%22Driving change within an enterprise requires three qualities: inspiration, persistence, and an attitude of continuous improvement.%22 – @chillosuvia via @gitlab&amp;amp;url=https://about.gitlab.com/blog/chris-hill-devops-enterprise-summit-talk/&amp;amp;hashtags=\" rel=\"nofollow\" target=\"_blank\" title=\"Tweet!\"> \u003Cspan class=\"twitter-text pull-left\"> \"Driving change within an enterprise requires three qualities: inspiration, persistence, and an attitude of continuous improvement.\" – @chillosu via @gitlab \u003C/span> \u003Cspan class=\"click-to-tweet\"> Click to tweet! \u003Ci class=\"fab fa-twitter\">\u003C/i> \u003C/span> \u003C/a> \u003C/div>\n\n### How you respond to complaints matters\n\n> \"Equally if not more important than the complaint itself, is the response or reaction to the complaint. 'Can I bring a complaint, that I know my voice is heard and that somebody cares about resolving my issue?'\"\n\n> \"'I asked the ops team three weeks ago to add a build dependency on the build servers, and it still hasn't been added. I'm just going to go back to building on my own.' This complaint obviously is a knife right to the heart because you feel like you've started to regress. But what I like about this complaint is it led to a behavioral change as well as a technical change. We decided instead of continuing the same direction, to move to ephemeral Docker containers to run all of our builds. With ephemeral Docker containers we defined every piece of build infrastructure as code. We used packer recipes to find a Docker container, and every app developer could now change the underlying infrastructure which built their application. They were empowered. They now had the self service to do their lifecycle on their own. And you're never going to receive the ops complaint because you've handed over the keys.\"\n\n### Efficient feedback loops are critical\n\n> \"Our feedback loops were 4-6 weeks. Could you imagine writing code today and six weeks from now being told whether or not it works or is broken? I don't remember the shirt that I wore yesterday, let alone what I had for breakfast this morning, let alone what I wrote six weeks ago, and chances are I've been working on features for the last six weeks, and for me to try to unpick what I was thinking at that point could be a huge context-switch penalty.\"\n\n> \"Infotainment also had a significantly higher number of contributors – up to 1,000 contributors. And what we noticed is that contributions don't come linearly, they come in bursts. We actually found that Thursdays were the day that most of our developers committed on. And when we had manual code reviews, if we didn't have reviewers ready on a Thursday, we would create our own backlog.\"\n\n### Deployments don't have to be limited to a traditional release cycle\n\n> \"How could we change the game? Instead of ditching the combustion engine, we ditched the dealership visits, and we implemented software over the air. And this huge Linux distribution that we build upwards towards 700 times per day in a continuous integration pattern, on a dev branch or a master branch, or a release branch, we can now deliver to every vehicle in the form of small, incremental deltas. We can also deliver it to the vehicle while you're driving, and not interrupt your daily life. In fact I showed Gene yesterday, we started a download and an install while I was driving, and the entire thing happened in the background. Jeff even made the comment, 'This is blue-green deployment for vehicles.'\"\n\n> \"One of my favorite indicators is deploys per day, per developer. But I was always embarrassed to share ours because it was always below one. All of our new software wouldn't actually make it to vehicles; it was always batched together. Now I'm happy to say we can deploy, and we have been in our engineering environment, 50-70 times per day of each individual piece of software to a target or to a vehicle.\"\n\n> \"No longer are deployments limited to a traditional software release cycle. We've now skirted every single process to get a technician a new piece of software, and bother somebody else's day – one of our owners – to come into a dealership and spend an hour waiting for their vehicle to be done. We've now empowered the customer to be their own technician.\"\n","customer-stories",[9,749,976,1829,1830,793],"user stories","automotive",{"slug":1832,"featured":6,"template":686},"chris-hill-devops-enterprise-summit-talk","content:en-us:blog:chris-hill-devops-enterprise-summit-talk.yml","Chris Hill Devops Enterprise Summit Talk","en-us/blog/chris-hill-devops-enterprise-summit-talk.yml","en-us/blog/chris-hill-devops-enterprise-summit-talk",{"_path":1838,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1839,"content":1845,"config":1850,"_id":1852,"_type":14,"title":1853,"_source":16,"_file":1854,"_stem":1855,"_extension":19},"/en-us/blog/ci-cd-changing-roles",{"title":1840,"description":1841,"ogTitle":1840,"ogDescription":1841,"noIndex":6,"ogImage":1842,"ogUrl":1843,"ogSiteName":670,"ogType":671,"canonicalUrls":1843,"schema":1844},"A surprising benefit of CI/CD: Changing development roles","DevOps and CI/CD make for faster code release, but they're also causing sweeping changes in dev and ops roles and responsibilities.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668027/Blog/Hero%20Images/cicd.jpg","https://about.gitlab.com/blog/ci-cd-changing-roles","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A surprising benefit of CI/CD: Changing development roles\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-07-16\",\n      }",{"title":1840,"description":1841,"authors":1846,"heroImage":1842,"date":1847,"body":1848,"category":679,"tags":1849},[851],"2020-07-16","\n\nWhen it comes to [CI/CD](/topics/ci-cd/) and [DevOps](/topics/devops/), the benefits are obvious: Get it right and cleaner code is released (a lot) faster.\n\nBut our [2020 Global DevSecOps Survey](/developer-survey/previous/2020/) found more subtle – and far less talked about – benefits. [CI/CD](https://docs.gitlab.com/ee/ci/) doesn't just allow developers to move faster and do more, it also allows them (and their operations counterparts) **to do less**. The automation required by CI/CD has drastically reduced the manual tasks involved in software development. With fewer time-consuming tasks, Dev and Ops roles and responsibilities are changing, in some cases dramatically.\n\nBut don't just take our word for it. We asked our 2020 survey takers to tell us in their own words how their roles and responsibilities are changing.\n\n_Our [2022 Global DevSecOps Survey](/developer-survey/) is out now! Learn the latest in DevOps insights from over 5,000 DevOps professionals._\n\n## The back story\n\nTo understand the impact of CI/CD and DevOps, it helps to have the full picture. In our 2020 survey 83% of developers said they're releasing code faster than ever before. In fact, nearly 60% of them deploy multiple times a day, once a day, or once every few days (that's 15 percentage points higher than in 2019). Just in the last year about 21% of developers said their teams added CI to their process, while just over 15% brought in continuous deployment.\n\nThe benefits of these processes are clear, the developers told us:\n\n\"We've set up automated processes to build, test, and deploy code using a mixture of our own tools and open source tools.\"\n\n\"(We now have) automated tests, automated deployment on code review approval.\"\n\n\"A templatized CI/CD process has significantly sped up build and deploy times to multiple environments in multiple clouds.\"\n\n\"Automated testing using GitLab CI has meant less overhead when reviewing code and quicker and safer deploys.\"\n\n\"Automated testing and continuous integration have made our deployments safer and more optimized. Now everyone in the team has the permission to deploy the code.\"\n\n\"CI and CD tremendously reduced time for build and deploy applications and eliminated problems with the build environment.\"\n\n\"Automation has made one-click testing and deployment possible for us.\"\n\n\"Deployment has become a non-task. Bootstrapping new projects is 10x faster because of the reusable infrastructure.\"\n\n\"We reduced our CI build queue time by 75%, which allowed developers to have test results faster and allows QA to have build artifacts to test faster.\"\n\n\"Automation within the CI/CD pipeline (including test automation and the actual CD automation part) has significantly increased the delivery speed of our team.\"\n\nOne developer shared something that really resonated with us. In the pre-CI/CD world the developer had to submit a ticket to seven different departments before \"button press\" (deployment), a process that used to take six weeks. Now with automation, it takes just two hours.\n\n## Off the list\n\nWith all the changes brought by CI/CD we wondered what developers no longer have to do in order to release code. It's safe to say it was a long list! The number one change was no longer needing to do manual testing, followed closely by dropping manual deployments.\n\n\"There's no need to manually merge my code and push to staging and then production.\"\n\n\"(We don't have to) sync the code between multiple Devs – Git does it well.\"\n\n\"(I no longer have to) manually test, argue about code style, and update dependencies.\"\n\n\"We don't have to code our product to work with different platforms. We can just code our product and integrate it with a tool to work with different platforms.\"\n\n\"I never create a ticket to ask Ops to deploy.\"\n\nDevs aren't the only ones not doing things they used to. Operations team members also reported radically changing roles. Nearly 40% said their development lifecyle is \"mostly\" automated, meaning they're free now to tackle different responsibilities. Over half of them are managing cloud services, while 42% said they're now primarily managing hardware and infrastructure.\n\nThis is how they described their roles today:\n\n\"We build out and improve the CI/CD platform.\"\n\n\"I'm a Jack of all trades.\"\n\n\"Right now it's 60% new project work and 40% operations/fire-fighting/developer support.\"\n\n\"We ensure reliability and availability, improve developer efficiency, automation, tools, and observability.\"\n\n\"I keep the lights on.\"\n\n\"(I'm responsible for) anything between dev and ops. From planning to deployment but not monitoring and maintaining apps in production.\"\n\n## Lines are blurring\n\nSo at the end of the day what do these DevOps-driven changes mean for the software development lifecycle? For starters, roles are blurring. Over one-third of developers told us they define and/or create the infrastructure their apps run on and 14% monitor and respond to that infrastructure – both of these tasks were traditionally the responsibility of the operations team. In fact, nearly 70% of ops pros said their developers were able to provision their own environments.\n\nDev and ops roles are starting to converge but at the same time developers are doubling down on tasks they consider critical to improving code quality (and thus the speed of code release). Just shy of 50% of developers told us they are now conducting [code reviews](https://docs.gitlab.com/ee/development/code_review.html) weekly but a growing body of anecdotal evidence – based on write-in responses – show that for many teams daily code reviews are a reality, something that would not have been possible if they were bogged down with manual testing and deployments.\n\nGoing forward, the \"free time\" created by [CI/CD automation](https://docs.gitlab.com/ee/topics/autodevops/) won't go to waste, developers told us. A majority want to push their teams to do way more testing of all types (functional, A/B, unit, security) and of course to automate those processes.\n\nWhat should you be doing that you're not doing now?\n\n\"We want to shift left on testing.\"\n\n\"We want to write more test cases to cover 100% of everything.\"\n\n\"We want better code reviews, faster code reviews and more code reviews.\"\n\n\"We should be doing everything better.\"\n\n**Read more about CI/CD and DevOps:**\n\n- Just getting started? Get our [CI/CD guide for beginners](/blog/beginner-guide-ci-cd/)\n\n- [The pain (and promise) of code reviews](/blog/beginner-guide-ci-cd/)\n\n- [Why there is never enough testing](/blog/what-blocks-faster-code-release/)\n\nCover image by [Jason Wong](https://unsplash.com/@jasonhk1920) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[109,9,681],{"slug":1851,"featured":6,"template":686},"ci-cd-changing-roles","content:en-us:blog:ci-cd-changing-roles.yml","Ci Cd Changing Roles","en-us/blog/ci-cd-changing-roles.yml","en-us/blog/ci-cd-changing-roles",{"_path":1857,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1858,"content":1864,"config":1869,"_id":1871,"_type":14,"title":1872,"_source":16,"_file":1873,"_stem":1874,"_extension":19},"/en-us/blog/ci-cd-github-extended-again",{"title":1859,"description":1860,"ogTitle":1859,"ogDescription":1860,"noIndex":6,"ogImage":1861,"ogUrl":1862,"ogSiteName":670,"ogType":671,"canonicalUrls":1862,"schema":1863},"We're extending free usage of CI/CD for GitHub for another six months!","Get another six months' use of CI/CD for GitHub on GitLab.com, free of charge.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666262/Blog/Hero%20Images/default-blog-image.png","https://about.gitlab.com/blog/ci-cd-github-extended-again","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We're extending free usage of CI/CD for GitHub for another six months!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Chia\"}],\n        \"datePublished\": \"2019-09-09\",\n      }",{"title":1859,"description":1860,"authors":1865,"heroImage":1861,"date":1866,"body":1867,"category":299,"tags":1868},[723],"2019-09-09","\n\n[CI/CD for GitHub](/solutions/github/) allows you to host your code on GitHub while taking advantage of GitLab for CI/CD. In fact, [CI/CD for external repos](https://docs.gitlab.com/ee/ci/ci_cd_for_external_repos/) lets you use any Git repo as a host together with GitLab CI/CD.\n\nWhen we first released the ability to use GitLab CI/CD with other Git repositories we placed it into our [Premium tier](/pricing/premium/) for GitLab Self-Managed. Normally, features go into a corresponding pricing tier on GitLab.com but we believed this was a specific case where we should offer a feature for free on GitLab.com because of the amount of repos on GitHub.com. Not knowing how long we'd keep this pricing, we set a deadline of one year. When that time came, we extended for six months.\n\nToday, we are extending the deadline for using CI/CD for external repos, including CI/CD for GitHub again. Now you'll have until **Mar. 22, 2020** to use these capabilities with private repos (see below for open source) as a [Free or Bronze](/pricing/) user on GitLab.com. This feature will continue to be part of the [Premium tier](/pricing/premium/) for GitLab Self-Managed.\n\n## Always free for open source\n\nThis extension applies to private repos hosted on GitLab.com. As part of our commitment to open source, [public projects get all the features of Gold for free](/solutions/open-source/). GitLab CI/CD for GitHub works by automatically mirroring your repos to GitLab.com. As such, if you have a public project on GitHub, it will also be public on GitLab so you can always take advantage of GitLab CI/CD for public projects.\n\n## Why we're extending the offer\n\nFor a rationale on our extension see our previous blog post when we [first extended external CI/CD](/blog/six-more-months-ci-cd-github/). When it came to the current deadline we found that the reasoning still held true and [decided to extend again](https://gitlab.com/gitlab-org/gitlab-ee/issues/13065).\n\nAs always, we'd love your feedback in the comments below.\n",[109,9,267,682],{"slug":1870,"featured":6,"template":686},"ci-cd-github-extended-again","content:en-us:blog:ci-cd-github-extended-again.yml","Ci Cd Github Extended Again","en-us/blog/ci-cd-github-extended-again.yml","en-us/blog/ci-cd-github-extended-again",{"_path":1876,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1877,"content":1882,"config":1887,"_id":1889,"_type":14,"title":1890,"_source":16,"_file":1891,"_stem":1892,"_extension":19},"/en-us/blog/ci-minutes-for-free-users",{"title":1878,"description":1879,"ogTitle":1878,"ogDescription":1879,"noIndex":6,"ogImage":1861,"ogUrl":1880,"ogSiteName":670,"ogType":671,"canonicalUrls":1880,"schema":1881},"Changes to CI pipeline minutes for new free users","This change better aligns to GitLab's buyer-based open-core model.","https://about.gitlab.com/blog/ci-minutes-for-free-users","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Changes to CI pipeline minutes for new free users\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2020-03-18\",\n      }",{"title":1878,"description":1879,"authors":1883,"heroImage":1861,"date":1884,"body":1885,"category":726,"tags":1886},[1609],"2020-03-18","___Update October 8, 2024: This blog is superseded by the blog post announcing [upcoming changes to CI/CD minutes for free tier users on GitLab.com](https://about.gitlab.com/blog/ci-minutes-update-free-users/). Please refer to our [pricing page](https://about.gitlab.com/pricing/) for the full breakdown of usage limits per tier.___\n\nEffective Sunday March 15, 2020, UTC, we are making changes to the CI pipeline minutes offered to *new* free users.\nMoving forward, all free accounts will have 2000 pipeline minutes per group per month independent of the visibility of the project.\nExisting free users will not have their plans changed.\n\n## What are pipeline minutes?\n\nAs we share on our [pricing page](https://about.gitlab.com/pricing/),\n\n> Pipeline minutes are the execution time for your pipelines on our shared runners. \n> Execution on your own runners will not increase your pipeline minutes count and is unlimited.\n\nPipeline minutes are a crucial part of what makes GitLab special.\n\n## Where did this come from?\n\nOne of our core values at GitLab is [efficiency](https://handbook.gitlab.com/handbook/values/#efficiency).\nAs GitLab grows and matures as both a company and a product, we've [really focused on becoming a more efficient company](https://youtu.be/wrnWaYS7Fgo?t=275). \nThis includes making sure we're being efficient in our CI offerings. \n\nAs we work on some new improvements to CI, including Windows and MacOS runners, we evaluated usage by free users.\nFrom an internal analysis, we found that 95% of free users who used CI minutes in January 2020 used fewer than 1000 CI minutes.\nBut we're not talking about 1000 minutes, we're talking about twice that. \nAnd 2000 minutes is a lot of minutes.\nThat's over an hour of CI minutes every day. \n\nWe are constantly working to provide the most value to our community through GitLab.\nThe best way we can do that is by strengthening our open source offering, including [make any features open source that are eligible to be open source](/company/pricing/#if-a-feature-can-be-moved-down-do-it-quickly).\n\n## What if that's not enough minutes?!\n\nIf 2000 minutes isn't enough, free users can buy [additional CI minutes](https://docs.gitlab.com/ee/subscriptions/#purchasing-additional-ci-minutes).\n\nAlternatively, you can bring your own runners. \nYou can [run specific runners for any of your projects](https://docs.gitlab.com/runner/). \nWe only count minutes on the shared runners we provide on GitLab.com.\n\n## What's next?\n\nExcited about all the cool things that can be done with CI?\nMe too!\n\nHere are some other things coming down the pipeline in the next couple of releases:\n\n* [Dynamic child pipeline creation via artifact includes](https://gitlab.com/gitlab-org/gitlab/-/issues/35632)\n* [Autoscaling GitLab CI jobs on AWS Fargate (MVC)](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/2972)\n* [Notifications for when pipelines are fixed](https://gitlab.com/gitlab-org/gitlab/-/issues/24309)\n",[109,9,267,682],{"slug":1888,"featured":6,"template":686},"ci-minutes-for-free-users","content:en-us:blog:ci-minutes-for-free-users.yml","Ci Minutes For Free Users","en-us/blog/ci-minutes-for-free-users.yml","en-us/blog/ci-minutes-for-free-users",{"_path":1894,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1895,"content":1900,"config":1905,"_id":1907,"_type":14,"title":1908,"_source":16,"_file":1909,"_stem":1910,"_extension":19},"/en-us/blog/ci-minutes-update-free-users",{"title":1896,"description":1897,"ogTitle":1896,"ogDescription":1897,"noIndex":6,"ogImage":1861,"ogUrl":1898,"ogSiteName":670,"ogType":671,"canonicalUrls":1898,"schema":1899},"Upcoming changes to CI/CD minutes for free tier users on GitLab.com","The reduction of CI/CD minutes aligns with the majority of free user usage","https://about.gitlab.com/blog/ci-minutes-update-free-users","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Upcoming changes to CI/CD minutes for free tier users on GitLab.com\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2020-09-01\",\n      }",{"title":1896,"description":1897,"authors":1901,"heroImage":1861,"date":1902,"body":1903,"category":726,"tags":1904},[1609],"2020-09-01","\nAt GitLab, we’ve been actively working towards empowering our community to make DevOps a reality for teams of all sizes. We’ve constantly [moved features down](/blog/new-features-to-core/) to our free product to enable more users to benefit from it. The [lower tiers offer more relative value](/company/pricing/#lower-tiers-have-more-relative-value) and help to get more users access to a complete DevOps platform.\n\nAs a result, the usage of GitLab has grown significantly over time to an estimated [30 million registered users](/why-gitlab/) - of which almost 6 million GitLab.com users are on our GitLab.com free tier. While we are excited by this exponential growth, our underlying costs to support this growth have increased significantly. As GitLab matures as both a company and a product, we must focus on becoming a more efficient company.\n\nWe evaluted CI/CD minute usage and found that 98.5% of free users use 400 CI/CD minutes or less per month. By lowering the current monthly usage limit, we are not only aligning the CI/CD minute limits with usage and related tier prices, but ensuring we can continue to maintain our commitment to offer a free GitLab.com tier.\n\n## Changes to the GitLab.com Free tier\n\nEffective October 1, 2020, we are reducing CI/CD minutes to 400 minutes per top-level group (or personal namespace) per month on the Free tier of GitLab.com.\n\n|    | Free | Bronze | Silver | Gold |\n| -- | ---- | ------ | ------ | ---- |\n| Price | $0 | $4 | $19 | $99 |\n| CI/CD Minutes | 400 | 2,000 | 10,000 | 50,000 |\n\n## Check and reduce CI/CD minutes used\n\nCI/CD minute usage can be reduced in a number of ways, including [bringing your own runners](https://docs.gitlab.com/runner/).\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/GrO-8KtIpRA\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nFor more details on the changes and how to manage and reduce your CI/CD minutes usage, please visit the [customer FAQ](/pricing/faq-compute-credit/).\n\n## Options to increase CI/CD minutes available\n\nIf 400 minutes is not enough, you can purchase additional CI/CD minutes at $10 per 1000 minutes or upgrade to [a paid tier](/pricing/). Also, you can bring your own runners. You can [run specific runners for any of your projects](https://docs.gitlab.com/runner/). We only count minutes on the shared runners we provide on GitLab.com.\n\nGitLab also offers Gold tier capabilities and 50,000 minutes per group per month CI/CD minutes for our [Open Source](/solutions/open-source/join/), [Education](/solutions/education/), and [Startups](/solutions/startups/) programs. If you are eligible for these programs, consider applying through their relevant program pages.\n\n## CI/CD minute limits will remain unchanged for Open Source, Education and Startups programs\n\nCI/CD minute limits will **remain unchanged** for members of our GitLab for [Open Source](/solutions/open-source/join/), [GitLab for Education](/solutions/education/), and [GitLab for Startups](/solutions/startups/) programs and will continue to match our [Gold tier](/pricing/). For more information on these programs and how to apply, please visit the relevant program pages.\n\n## More information\n\nPlease refer to the [customer FAQ](/pricing/faq-compute-credit/) for more information.\n\nTo address your questions and feedback, we have created a space in the [GitLab Community Forum](https://forum.gitlab.com/t/ci-cd-minutes-for-free-tier/40241), which is actively monitored by GitLab Team members and Product Managers involved with this change.\n",[109,9,267,682],{"slug":1906,"featured":6,"template":686},"ci-minutes-update-free-users","content:en-us:blog:ci-minutes-update-free-users.yml","Ci Minutes Update Free Users","en-us/blog/ci-minutes-update-free-users.yml","en-us/blog/ci-minutes-update-free-users",{"_path":1912,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1913,"content":1919,"config":1925,"_id":1927,"_type":14,"title":1928,"_source":16,"_file":1929,"_stem":1930,"_extension":19},"/en-us/blog/ciso-secure-next-gen-software",{"title":1914,"description":1915,"ogTitle":1914,"ogDescription":1915,"noIndex":6,"ogImage":1916,"ogUrl":1917,"ogSiteName":670,"ogType":671,"canonicalUrls":1917,"schema":1918},"Securing next generation software","Scale your security efforts by understanding and integrating with the DevOps workflow.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673038/Blog/Hero%20Images/ciso-secure-next-gen-software.jpg","https://about.gitlab.com/blog/ciso-secure-next-gen-software","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Securing next generation software\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cindy Blake\"}],\n        \"datePublished\": \"2020-01-27\",\n      }",{"title":1914,"description":1915,"authors":1920,"heroImage":1916,"date":1922,"body":1923,"category":679,"tags":1924},[1921],"Cindy Blake","2020-01-27","\nNext generation software has changed the way developers work, allowing them to \nproduce code quickly and at scale. This poses new security challenges \nhowever and all too often security is treated as a bolt-on task at the end of the \nprocess. Approaching security in this manner won’t scale to the size and \nvelocity of software development. It’s therefore critical that security \ninnovation finds its way into your development lifecycle. You can be sure \nthat your cyber-adversaries aren’t using hacking methods from 10 years ago – \nso why should you be using security technologies and methods from 10 years ago?\n\nTo tackle these changes, CISOs will need to understand three critical shifts in \nnext-generation software: \n\n1. How software is composed and executed\n1. How software is delivered and managed\n1. How software complies with regulatory requirements\n\nIt’s time to think of security as an outcome from an integrated DevSecOps effort.\n\nIn my recent book ([free to download here](/resources/ebook-ciso-secure-software/)) \nI explain these three shifts in depth to help security professionals understand \nnew application-related attack surfaces and areas of risk, how DevOps processes \nand tools affect their security efforts, and how security teams can adapt and \nscale to unite the iterative development and security workflows. \n\n## Secure software in the age of DevOps\n\nSecuring the software development lifecycle has never been easy, \nand efficiency-boosting development changes have created more challenges for \nsecurity teams to face. To be successful, CISOs and their teams need to be \nable to focus on:\n\n* Basic security hygiene\n* Monitoring, detection, and automated response\n* Building on standardization, policy automation, validation, common controls, \nand continuous improvement\n\n## Think it through\n\nAt the end of my book, you’ll find 10 steps to take as you work toward your \nnext generation security program. Here is a quick preview of a few of the steps:\n\n1. Start by assessing where you are, and decide on a path to move forward. \n1. Align metrics to manage risks, not silos. \n1. Go broad, not deep, when testing software. \n1. Apply continuous security scanning to iterative development.\n1. Apply Zero Trust principles to your applications and their infrastructure.\n\nCover image by [theverticalstory](https://unsplash.com/@theverticalstory) on [Unsplash](https://unsplash.com/photos/LjkEdYv55bA)\n{: .note}\n",[875,9,683],{"slug":1926,"featured":6,"template":686},"ciso-secure-next-gen-software","content:en-us:blog:ciso-secure-next-gen-software.yml","Ciso Secure Next Gen Software","en-us/blog/ciso-secure-next-gen-software.yml","en-us/blog/ciso-secure-next-gen-software",{"_path":1932,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1933,"content":1939,"config":1944,"_id":1946,"_type":14,"title":1947,"_source":16,"_file":1948,"_stem":1949,"_extension":19},"/en-us/blog/cloud-adoption-roadmap",{"title":1934,"description":1935,"ogTitle":1934,"ogDescription":1935,"noIndex":6,"ogImage":1936,"ogUrl":1937,"ogSiteName":670,"ogType":671,"canonicalUrls":1937,"schema":1938},"Cloud strategy and adoption roadmap for businesses","Everything you need to know for transforming your business to the cloud and how to plan out the perfect strategy for it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680891/Blog/Hero%20Images/cloud-adoption-roadmap.jpg","https://about.gitlab.com/blog/cloud-adoption-roadmap","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Cloud strategy and adoption roadmap for businesses\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-12-05\",\n      }",{"title":1934,"description":1935,"authors":1940,"heroImage":1936,"date":1941,"body":1942,"category":679,"tags":1943},[788],"2019-12-05","\n\nOrganizations continue to focus on scalability and growth, and cloud can be a valuable asset in those strategies. When it comes to cloud adoption, not every organization takes the same path – some already work in multiple clouds, some work in industries with strict compliance standards, and some may only be starting their cloud journey.\n\nIt’s estimated that investments in infrastructure to support cloud computing account for [more than a third of all IT spending](https://www.zdnet.com/article/top-cloud-providers-2019-aws-microsoft-azure-google-cloud-ibm-makes-hybrid-move-salesforce-dominates-saas/), and teams want to make sure they’re investing in the right things that benefit them for the long-term. In order to implement the best cloud strategies to meet your needs, a cloud adoption roadmap should have four key steps:\n\n*   Assessment\n*   Planning\n*   Implementation\n*   Optimization\n\n## What is cloud adoption?\n\nCloud adoption aims to mitigate risk, reduce costs, and gain organizational scalability of database capabilities. “The cloud” refers to software and services that live and operate online rather than in an on-premise network of servers or local computers, and it creates incomparable flexibility in daily operations. The cloud provides the necessary speed for an organization to launch new releases quickly, securely, and efficiently.\n\n## Assessing cloud-readiness\n\nCloud adoption does not guarantee scalability and growth on its own. In order for cloud implementation to be successful, organizations have to identify challenges and expertise gaps that affect their cloud-readiness. For example, a lift-and-shift to the cloud isn’t going to produce great results if existing business applications are monolithic and/or outdated. In this scenario, companies will need to commit to an [application modernization](/blog/application-modernization-best-practices/) strategy that makes a cloud investment worthwhile.\n\nFor large enterprises currently working in a traditional IT environment, there may be internal barriers such as lack of organizational buy-in, reluctance to invest the required resources in a multiyear effort, or even regulatory and compliance restraints. On average, [enterprise adoption remains low at around 20%](https://www.mckinsey.com/business-functions/mckinsey-digital/our-insights/cloud-adoption-to-accelerate-it-modernization), so it may be beneficial for these organizations to adopt cloud’s [agile](/solutions/agile-delivery/) and automated operating model within their traditional IT, at least in the short-term.\n\nAs part of the assessment stage of your cloud adoption roadmap, ask yourself the following questions:\n\n1. Do we have the internal expertise necessary for a cloud migration or will we need to implement an education or hiring plan?\n2. Do we have industry requirements or compliance regulations to consider?\n3. Will we need to tackle legacy application modernization as well?\n4. What strategies have worked for similar companies in similar industries?\n\n## Creating a plan\n\nAfter identifying challenges and opportunities to cloud adoption, the work on an implementation plan begins. Whether it’s a cloud transformation or just migrating from one cloud to another, it’s important to create actionable steps and have the right leadership in place to guide the process.\n\nKeeping your assessment in mind, your organization will need to decide which clouds and cloud models work best for your needs and business goals. You’ll need to evaluate public, private, or hybrid clouds, in addition to SaaS, IaaS and PaaS cloud models, to determine which combination fits within your limitations. Having leaders with expertise in these areas of cloud computing, rather than relying on information from the cloud service providers themselves, will ensure that decisions are unbiased with your unique needs in mind.\n\n![cloud models](https://about.gitlab.com/images/blogimages/cloud_models.png){: .shadow.medium.center}\n\nBut what if you want to use multiple cloud providers? This is where a [multicloud](/topics/multicloud/) approach can be beneficial.\n\n### What is multicloud?\n\nMulticloud describes [how enterprises use multiple cloud providers to meet different technical or business requirements](https://www.zdnet.com/article/multicloud-everything-you-need-to-know-about-the-biggest-trend-in-cloud-computing/). At its core, multicloud is made possible through cloud-native applications built from containers using services and allows for multiple services to be managed in one architecture. Research indicates [85% of enterprises currently operate in multiple clouds](https://www.ibm.com/blogs/cloud-computing/2018/10/19/survey-multicloud-management-tools/).\n\n\n\nDuring the planning phase of your cloud roadmap, consider the following:\n\n1. Do we have internal expertise to make sure we’re making the right decisions?\n2. Have we evaluated the different cloud models?\n3. Would a multicloud approach be a good fit?\n\n## Putting plans into action\n\nThe implementation phase usually requires multiple steps and thrives when teams are able to communicate and collaborate with each other. As plans change (and they inevitably will), high visibility ensures teams can adapt.\n\nIn our recent migration from Azure to GCP, we documented our progress publicly and leaned on three of our [core values](https://handbook.gitlab.com/handbook/values/): efficiency, iteration, and transparency. We believe in taking small steps and looking for the most [boring solutions](https://handbook.gitlab.com/handbook/values/#boring-solutions) because that allows us to get feedback quickly and reduce cycle times. Whether migrating to the cloud for the first time, or just moving from one cloud to another, things rarely ever go smoothly. By practicing iteration, we were able to course correct and come up with the right solutions quickly. Learn how we put our values into action and watch our presentation at Google Cloud Next ‘19.\n\n[GitLab’s journey from Azure to GCP](/blog/gitlab-journey-from-azure-to-gcp/)\n{: .alert .alert-gitlab-purple .text-center}\n\nWhen implementing cloud strategies, expect your approach to DevOps to change as well.\n\n[DevOps](/topics/devops/) is all about developers and operations working together and using the cloud as a common language, and [cloud native app development](/topics/cloud-native/) will require a shift to a DevOps operating structure. Once you’ve decided on the cloud service and deployment models in your adoption roadmap, you’ll also need to evaluate which DevOps tools support your cloud initiatives. [Developer tools have a high capacity for driving cloud usage](/blog/gitlab-ci-cd-is-for-multi-cloud/) because once you have your application code hosted, the natural next step is finding a place to deploy it. For example, if you decided during the planning phase to adopt multicloud, having cloud-agnostic tools will play a big role in the success of that strategy.\n\nDuring the implementation phase of your cloud roadmap, consider the following:\n\n1. Take small steps and practice iteration so you can course correct effectively.\n2. Make sure teams have visibility into the cloud process and can collaborate as things progress.\n3. Ensure your DevOps structure will be able to support your cloud and cloud native application development initiatives.\n4. Evaluate developer tools and consider if cloud-agnostic tools would allow more flexibility with multiple clouds.\n\n## Cloud optimization and beyond\n\nWhile there will inevitably be a point when cloud models and DevOps tools have been implemented, a cloud adoption roadmap is really a never-ending journey for continuous improvement. By the time a cloud adoption timeline has been completed, there will be new technologies and new paths for cloud optimization already on the horizon. A solution you implemented may need to be deprecated in favor of something that works a little better. A valuable part of iteration is making decisions and acting quickly, and that is a process that never ends.\n\nIn [Cloud Powers the New Platform Economy](https://www.forrester.com/report/Cloud+Powers+The+New+Platform+Economy/-/E-RES120506), Forrester explains that you must automate, integrate, and orchestrate all the moving parts of your cloud to keep up with the pace of innovation the cloud economy demands. As you continue to improve your cloud ecosystems, consider the following:\n\n1. Are we keeping up with the pace of innovation and how can we improve?\n2. Are we investing in next-generation skills and providing continuing education opportunities?\n3. Are we evaluating new technologies?\n4. Are we managing our cloud effectively?\n\n\nCover image by [Matt Howard](https://unsplash.com/@thematthoward?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/journey?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText).\n{: .note}\n",[9,1041],{"slug":1945,"featured":6,"template":686},"cloud-adoption-roadmap","content:en-us:blog:cloud-adoption-roadmap.yml","Cloud Adoption Roadmap","en-us/blog/cloud-adoption-roadmap.yml","en-us/blog/cloud-adoption-roadmap",{"_path":1951,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1952,"content":1958,"config":1965,"_id":1967,"_type":14,"title":1968,"_source":16,"_file":1969,"_stem":1970,"_extension":19},"/en-us/blog/cloudhealth-and-gitlab-reducing-overruns",{"title":1953,"description":1954,"ogTitle":1953,"ogDescription":1954,"noIndex":6,"ogImage":1955,"ogUrl":1956,"ogSiteName":670,"ogType":671,"canonicalUrls":1956,"schema":1957},"How to prevent deployments from overrunning your budget","Guest authors from VMware share how to include budget and resource checking into your continuous deployment with Cloudhealth and GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670389/Blog/Hero%20Images/gitlab-cloud-journey.png","https://about.gitlab.com/blog/cloudhealth-and-gitlab-reducing-overruns","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to prevent deployments from overrunning your budget\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Davis\"},{\"@type\":\"Person\",\"name\":\"Bahubali (Bill) Shetti\"}],\n        \"datePublished\": \"2019-08-26\",\n      }",{"title":1953,"description":1954,"authors":1959,"heroImage":1955,"date":1962,"body":1963,"category":679,"tags":1964},[1960,1961],"Tim Davis","Bahubali (Bill) Shetti","2019-08-26","\n\nManaging deployments is a complex task and DevOps admins generally consider it a victory when a deployment is\nachieved and somewhat repeatable. Unfortunately this process doesn't give DevOps admins time to\nconsider the impact of the outcome on the larger operations pipeline. We know the importance of\n[Continuous Verification](https://thenewstack.io/continuous-verification-the-missing-link-to-fully-automate-your-pipeline/)\n– it's just one of several day-two operations and best practices that need to be brought into the\ncontinuous deployment (CD) process to achieve efficiencies. But we also need to look at the budget.\n\n## Adding budget and resource checking into your CD\n\nMost developers and DevOps admins don't consider the impact of their deployment on the budget. They\nalso don't generally check if sufficient resources in AWS exist prior to deployment because, after\nall, aren't there \"unlimited\" resources on AWS?\n\nAdding the proper budget and resource checks into the pipeline helps avoid:\n\n* Potential rollbacks and clean-up actions\n* Redeployment (\"lift and shift\") into other regions in AWS\n* Long analysis to pinpoint budget overruns\n\nNot having to deal with these tasks improves the DevOps admin's metrics, such as mean time to change (MTTC),\ndeployment time, etc., and subsequently efficiency goes up.\n\n## Understanding the policy\n\nPrior to implementing any of these checks, it’s important to understand the \"policy.\" While every\norganization is different, and the iterations of \"policy\" are endless, there are some basic checks\nthat should always be implemented:\n\n* Ensure the project-specific budget is not already overrun\n* Will this deployment exceed the project budget?\n* Is the project already over project-specific limits and restrictions? (i.e. cannot use RDS, or\ncan't have more than 10 EC2 instances in a deployment)\n* Will this deployment exceed the project-specific resource policy?\n\nWith these basic checks in place, at least some initial sanity is achieved during a pipeline execution.\nMore and more complex iterations can be added as more is learned about the project and processes are improved.\n\n## How do you do it?\n\nRegardless of the policy complexity, implementing these checks can be easily accomplished with\nstandard off-the-shelf tools like [CloudHealth by VMware](https://cloudhealthtech.com) and [GitLab](/).\n\n* CloudHealth by VMware allows you to define \"perspectives\" specific to your project, create governance\nrules, and access this information through an API for easy integration into any CI/CD tool.\n* GitLab allows you to easily add in scripts and/or pre-built code (containers) enabling\nany possible check against any potential external system.\n\nIn order to highlight how to implement this type of check into the CI/CD pipeline, we've\ndelivered an [example configuration](https://cloudjourney.io/articles/multicloudops/budget_check_cicd-td/)\nusing both CloudHealth and GitLab. We hope this provides a nice baseline to build from.\n\n![CD WITH A CH check from GitLab CI/CD pipelines](https://about.gitlab.com/images/blogimages/glcdpipeline.png){: .shadow.medium.center}\n\n## In summary\n\nAlthough we've provided a baseline that we hope can be used for more complex policy checks in CD,\nconvincing DevOps admins to implement this is another problem. Improving metrics should provide\nan incentive for DevOps admins but it is not sufficient for them to simply add budget and resource checks.\nWhile every enterprise has its own process and metrics, we recommend adding a budgetary efficiency\nmetric for DevOps admins.\n\nUsing the configuration above, it’s easy to add in CloudHealth to continuously check the project's\nbudget and utilization, and adding a DevOps budget metric will not only ensure that these checks\nare deployed but will also lead to more efficient deployments.\n\nIf you have any questions regarding this or any other issue, feel free to reach out\nto us [@cloudjourneyio](https://twitter.com/cloudjourneyio) on Twitter!\n\n### About the guest authors\n\n_Bahubali (Bill) Shetti is the director of public cloud solutions for VMware Cloud Services at VMware.\nHe leads a team of cloud architects that evangelize and develop solutions for improving public cloud\noperations (AWS/Azure/GCP). Bahubali was part of the initial team that developed and launched\nVMware Cloud Services. Previous to VMware, he was director of product management at VCE\n(now Dell) for Cloud Management Products. Between 2011-2014, Bahubali lead operations at Cumulus\nNetworks, lead AWS cloud operations at several startups, and headed an open source routing\nsoftware project. Between 2008-2010, Bahubali lead the cloud investment practice at Storm Ventures.\nHe spent 9 years at Cisco in product management and business development. He holds an M.S. in\nInformation Networking from Carnegie Mellon and a B.S. in Electrical Engineering from Rutgers._\n\n_Tim Davis is a cloud advocate at VMware where he focuses on public cloud operations and cloud native\napplications. He provides consulting guidance to a wide range of customers on these topics and\nprovides a bridge between customers and product teams at VMware. He also works to evangelize\nnative cloud usage including AWS, Azure and GCP. Prior to his current role, he was a specialist systems\nengineer focused on VMware’s Networking and Security product line. Before VMware, Tim worked as a\nconsultant and VMware architect at Dell Services, which wasone of the largest contracts held at\nthe time. His background is in operations/management and architecture. He holds numerous\nindustry certifications including from VMware and Amazon Web Services._\n",[109,1041,9,231],{"slug":1966,"featured":6,"template":686},"cloudhealth-and-gitlab-reducing-overruns","content:en-us:blog:cloudhealth-and-gitlab-reducing-overruns.yml","Cloudhealth And Gitlab Reducing Overruns","en-us/blog/cloudhealth-and-gitlab-reducing-overruns.yml","en-us/blog/cloudhealth-and-gitlab-reducing-overruns",{"_path":1972,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1973,"content":1979,"config":1985,"_id":1987,"_type":14,"title":1988,"_source":16,"_file":1989,"_stem":1990,"_extension":19},"/en-us/blog/code-counting-in-gitlab",{"title":1974,"description":1975,"ogTitle":1974,"ogDescription":1975,"noIndex":6,"ogImage":1976,"ogUrl":1977,"ogSiteName":670,"ogType":671,"canonicalUrls":1977,"schema":1978},"Lightning fast code counting for better code management intelligence","Knowledge of your code composition can come through simple counting of lines of code per language.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682614/Blog/Hero%20Images/noaa-PkHsrwNOfBE-unsplash.jpg","https://about.gitlab.com/blog/code-counting-in-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Lightning fast code counting for better code management intelligence\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2023-02-15\",\n      }",{"title":1974,"description":1975,"authors":1980,"heroImage":1976,"date":1981,"body":1982,"category":791,"tags":1983},[1239],"2023-02-15","\n\nOne of the earliest forms of intelligence was to simply answer the question “How many?”. Counting is one of the first things that we learn as a child. As we grow older, we come to see this deceptively simple concept as somewhat childish. Yet, upon the concept of counting, the entire discipline of statistics is founded. In turn, every discipline that benefits from statistics owes a debt of gratitude to the very humble concept of counting. \n\nMany of the massive data lakes we keep are essentially vast amounts of counting. Using artificial intelligence to analyze this data, we frequently find insights we were not expecting. So it would seem that counting is somewhat of a fractal concept – it’s deceptively simple, but, when compounded, generates delightful things.\n\nSo if we have a thing we are trying to be more intelligent about, our first endeavor might be to count it. Let’s see how to apply that to our code stored in GitLab.\n\n### Why developers count code\n\nThe following list is from real-world scenarios. Many of them are also asserted in Ben Boyter’s blog post [Why count lines of code?](https://boyter.org/posts/why-count-lines-of-code/). Their enumeration here is not an endorsement of the validity or accuracy of code counting for the claimed benefit and the fundamental assumptions of such models are not stated. Because code counting is essentially a form of modeling, it is also subject to George Box’s axiom: “All models are wrong, but some are useful.”\n\n- Showing the languages in a repository using an absolute metric like source lines of code helps to quickly assess if one can contribute to the project, given their own talents. \n- Cost assessment for anything which charges by “lines of code” (some code scanning and development tools may charge this way).\n- Although [research](https://gitlab.com/gitlab-org/gitlab/-/issues/371038) shows that lines of code are not a good metric for measuring contribution, some developers have gotten used to seeing lines of code per contributor. \n- Code base shrinkage as a measure of good architecture (simplification).\n- Anything where the complexity of code affects project agility and costs. For instance, assessing and reporting status on migrating a code base to a new language. \n- Staffing a development team – understanding what language competencies are needed across the team and in what relative proportion to each other or understanding that for the entire organization’s codebase.\n- IT tooling decisions to support the needs of an organization given the most used coding languages across all repositories in the org.\n- Assessment of tech debt.\n\nWhile it is easy to create bad models with any of the above counts, the focus of this post is to get some good counts from which you can carefully build a model.\n\n### Toolsmithing GitLab CI: A working example as a shared CI library\n\nThe easiest way to differentiate between a “toolized” and “templated” solution is that you can simply and easily reuse this exact code without needing to change it. Many formal coding languages have the concept of shared libraries or dependencies that are essentially toolized. A templated solution consists of a starting point that you customize and then have to manage the code yourself. These can function as scaffolding for a starting point for an entire project or snippets of code that do a specific function. The fundamental difference is that when you use a template, you end up owning and managing the resultant code going forward.\n\nIn [GitLab CI](https://docs.gitlab.com/ee/ci/introduction/index.html), we can create our own tooling or dependencies with a few tricks stolen from [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/). These tricks are:\n\n1. Use includes to reference the shared managed library code (this creates a “dependency” on code that is being managed outside of your own).\n2. The includable code must be written like a function where all needed inputs are either passed in, or can be collected from the environment. No hard coding is allowed because that means you’ve created a template which can’t be depended upon directly.\n3. Use GitLab CI ‘functions’. I am coining this term to indicate that in GitLab you can precede a job name with a “.” (dot) and it will not be executed when it is read. Then you can create a new job using all the code in the “dot named” job and add variables by using the `extends:` command keyword. By using dot named jobs in your includable code, the developer consuming the “managed shared CI dependency” can decide when, where, and how to call the toolized code.\n\n### The result: A code-counting GitLab CI extension\n\nHere are some of the final design attributes of this code counting solution:\n\n- Is extremely fast for the given task.\n- Leverages the Git clone opitimizations lessons contained in this article: [How much code do I have? A DevSecOps story](https://acloudguru.com/blog/engineering/how-much-code-do-i-have-a-devsecops-story).\n- Uses the [lightning fast, open source code counting tool SCC](https://github.com/boyter/scc) by Ben Boyter.\n- Is implemented as a reusable GitLab CI shared library extension.\n- Allows configuration of the file extensions that should not be checked out because they do not include source code to be counted.\n- Leverages the GitLab Run Pipeline forms capability.\n- Can enumerate and count an entire group hierarchy in GitLab, or be given a stipulated list.\n- Uses the runner token to access and read repositories by default, but can be given a specific token.\n- Uploads HTML and text artifacts that contain the code counting report.\n- Purposely emits the code counting results into CI logs for easy reference.\n\n### The output\n\nResults are shown below in the CI log but they are also captured as an HTML artifact.\n\nThe clone time is also in the log for each project so that it can be verified that the cloning optimizations are making a substantial difference.\n\nThese particular results are counting all the code in [https://gitlab.com/guided-explorations](https://gitlab.com/guided-explorations).\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/code-counting-in-gitlab/codecountingcilog.png)\n\n### The code\n\nThe code is available in this project: [https://gitlab.com/guided-explorations/code-metrics/ci-cd-extension-scc](https://gitlab.com/guided-explorations/code-metrics/ci-cd-extension-scc). You can view the scanning results in the job logs of past runs here: [https://gitlab.com/guided-explorations/code-metrics/ci-cd-extension-scc/-/pipelines](https://gitlab.com/guided-explorations/code-metrics/ci-cd-extension-scc/-/pipelines).\n\nRather than fail the entire job when a project fails to clone, the job simply logs the error from an attempted clone. This allows review of valid use cases for not being able to clone and obtains as complete of a picture as possible. The cloning error log is uploaded as a job artifact and emitted to the log.\n\n### Innovation: MR complexity metrics extension\n\nDuring a customer engagement I was asked whether there was a way to assess how much change a Merge Request contained and mark it. This was because an operations team was missing their SLAs for deployments due to the amount of change, and, therefore, risk and review could be highly variable. However, since there was no way to estimate this without human eyes on it, MRs with a high degree of change would overrun their SLA when they couldn’t be pre-triaged.\n\nI wondered if I could use the previously built code counting solution to count diffs and get a rough idea of how much change had occurred in the commits of an MR branch and then apply labels to MRs to give at rough idea of their degree of change as a sort of proxy for how much review time might be required.\n\nIt turned out to be plausible and you can review the [Shared Library in Git Diff Revision Activity Metrics CI EXTENSION](https://gitlab.com/guided-explorations/code-metrics/git-diff-revision-activity-metrics) and see the results in the MRs list of this working example project that uses that code: [MR list for Diff Revision Activity Analytics DEMO](https://gitlab.com/guided-explorations/code-metrics/diff-revision-activity-analytics-demo/-/merge_requests).\n\n### The value of remote work water cooler conversations\n\nI have to let you know why this blog was written now when this solution has been around for quite a while. You often hear about how working remotely does not allow for water cooler conversations, which in the story you’re told are where real innovation happens.\n\nWithin GitLab’s [Remote First culture](/company/culture/all-remote/guide/) it is expected that anyone in the company can schedule a “coffee chat” with anyone else. The cultural expectation is that this is normal and, unless you are getting an overwhelming number of these requests, that when asked, you will find time to socially connect.\n\nI received a coffee chat request from [Torsten Linz](https://gitlab.com/tlinz), the Senior Product Manager for the Source Code Management group, to chat about my comments and linking of a working example to an issue about code counting that he had become aware of. He also wanted to see if I could help get a copy of it working in his GitLab group.\n\nDuring that collaborative time, I discovered that my example was not working because of some major code changes in SCC and because it presumed the GitLab group to be enumerated did not need the counting job to authenticate to prove that it should have access to the projects. While we were collaborating, we fixed these problems and improved the solution to use the SCC binary, rather than depend on working Golang runtimes. After our collaborative session, as I tweaked some more, I did parameter documentation in README.md and debugged the ability to run it either with a group enumeration or a provided list of specific git repos.\n\nSo I owe big thanks to Torsten and to GitLab’s cultural support for remote first water cooler conversations for improving this working example to the point that it is worth sharing with a broader audience. If you’d like to know more, check out the GitLab handbook page: [Informal communication in an all-remote environment](/company/culture/all-remote/informal-communication/).\n\n_Cover image by [NOAA](https://unsplash.com/@noaa?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/lightning-fast?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)_\n",[9,1984,976],"solutions architecture",{"slug":1986,"featured":6,"template":686},"code-counting-in-gitlab","content:en-us:blog:code-counting-in-gitlab.yml","Code Counting In Gitlab","en-us/blog/code-counting-in-gitlab.yml","en-us/blog/code-counting-in-gitlab",{"_path":1992,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1993,"content":1999,"config":2005,"_id":2007,"_type":14,"title":2008,"_source":16,"_file":2009,"_stem":2010,"_extension":19},"/en-us/blog/collaboration-communication-best-practices",{"title":1994,"description":1995,"ogTitle":1994,"ogDescription":1995,"noIndex":6,"ogImage":1996,"ogUrl":1997,"ogSiteName":670,"ogType":671,"canonicalUrls":1997,"schema":1998},"Improving DevOps with Better Communication & Collaboration","The most important skills for a DevOps pro? Collaboration and communication. We share some of our best blogs, articles, and videos to help you work better, together.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681779/Blog/Hero%20Images/chatbubble.jpg","https://about.gitlab.com/blog/collaboration-communication-best-practices","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Improving DevOps and software development with communication and collaboration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-11-23\",\n      }",{"title":2000,"description":1995,"authors":2001,"heroImage":1996,"date":1767,"body":2003,"category":679,"tags":2004},"Improving DevOps and software development with communication and collaboration",[2002],"Sara Kassabian","\n\nWe believe that the best software developers, companies, and products are those that embrace collaboration and transparency in communication, which is why we’ve compiled some of our best blog posts, articles, and videos about the topic in this blog collection.\n\nBut first, has your engineering team adopted a [DevOps](/topics/devops/) strategy? [Start here if you need help communicating why DevOps is the best approach](/blog/devops-stakeholder-buyin/) to stakeholders outside the engineering team.\n\n## What is DevOps collaboration?\n\nCollaboration is as important to DevOps as automation and nearly as hard to achieve. Software development was traditionally split into very different functions that didn’t work together; the advent of DevOps, bringing dev and ops together, was designed to change all of that. \n\n## Why collaboration in software development matters\n\nWe unpack three key reasons why collaboration is an essential skill for software developers.\n\n### 1. Your future as a software developer is bright if you embrace collaboration\n\nWhile some might consider teamwork and communication to be soft skills, the results of our [2020 DevSecOps](/developer-survey/) survey reveal a consensus among developers, security pros, ops team members, and testers that collaboration and communication are the most important skills for a DevOps professional.\n\n\"You can’t have one brain that knows it all,\" explains [Darwin Sanoy](/company/team/#DarwinJS), senior solutions architect, Americas, at GitLab. \"You need communication and collaboration to work together.\" Read more to learn about how to [brush up on soft skills to future-proof your DevOps career](/blog/future-proof-your-developer-career/) .\n\n### 2. The best way to practice collaborative software development? In open source communities\n\nGitLab is an open-core product with [open source and source-available code](/handbook/marketing/strategic-marketing/tiers/#open-source-vs-source-available). This means that community contributors can push changes to our open source codebase, and can view our proprietary, source-available code. Anyone who has been a part of an open source community can tell you that they’re very global, so you could be living in Mexico and [collaborating on an MR with someone in Poland](/blog/gitlab-hero-devops-platform/). Global collaboration without needing a passport is enriching and unique, but sometimes cultural differences can give way to miscommunication. The best way to embrace working in open source communities is to practice mindful communication and always [assume positive intent](https://handbook.gitlab.com/handbook/values/#assume-positive-intent). Most of the time, conflict is the result of misunderstanding, not malevolence.\n\nEarlier this year at [GitLab Commit Virtual](https://www.youtube.com/playlist?list=PLFGfElNsQthYQaTiUPQcu4O0O20WHZksz), we shared some communication hacks to help you seem approachable and invite dialog while contributing to open source communities. Watch the video below to learn all about it.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/XTBWX-evVEA\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nAnd while open source and security might seem like a strange coupling, we found that community contributions helped fortify our GitLab Secure product. [Read the blog post](/blog/integrating-with-gitlab-secure/) to learn more about how inviting contributions from our open source community helped users extend our product to suit their needs.\n\n### 3. Why can’t dev and sec be friends?\n\nTeamwork doesn’t always come easily, particularly when you’re on opposite sides of the DevOps lifecycle. While at GitLab, dev and sec teams do work well together, this isn’t the case on every engineering team.\n\n[Brendan O’Leary](/company/team/#brendan), senior developer evangelist at GitLab, and [Ethan Strike](/company/team/#estrike), security manager for Application Security, [talk candidly about their respective objectives and how it’s better to integrate security into the development process](/blog/developer-security-divide/), as opposed to tacking it onto the end.\n\n## Best practices for developers\n\nWe explain why code review and pair programming are two methods that help engineering teams ship more stable code.\n\n### Code reviews for all\n\nFast feedback is one of the pillars of collaborative software development practices, and code reviews are an essential component. Whether you’ve been coding for 10 years or 10 weeks, having more than one person review your work is critical for catching errors and shipping stable code. But that doesn’t mean code reviews are simple. [Read our blog post on the challenges of code review](/blog/challenges-of-code-reviews/) to learn tips on how to overcome the hurdles, and [watch the demo on how to use GitLab for code review](https://page.gitlab.com/resources-demo-scm.html). [Phil Hughes](/company/team/#iamphill), staff frontend engineer for the Create: Code Review team summarizes [four tips to make code review more efficient and less painful](/blog/efficient-code-review-tips/). But all in all, we believe that despite the challenges of code review, it’s absolutely worth any hassle.\n\nWhile you’re at it, [check out our blog post where we share some of our ideas about the future of merge requests and code review](/blog/future-merge-requests-realtime-collab/) with GitLab. Not all of the ideas will necessarily be implemented, but it will give you some insight as to our vision moving forward.\n\n### Use the buddy system\n\nPair programming is basically code review in real time, and it is also one of the pillars of [Agile software development](/solutions/agile-delivery/). Typically it is done with two programmers at the same workstation, but when you’re on a a globally distributed team like we are at GitLab, that workstation exists in the virtual realm instead of IRL. In pair programming, one programmer creates the code (the driver) while the other person reviews the code (the navigator).\n\n>\"Programming is fairly abstract. When you have to explain a concept verbally, it often makes you realize you're missing pieces or that there are better ways to solve problems than your initial idea.\" – [Brandon Lyon](/company/team/#brandon_m_lyon), marketing web developer/designer\n\nThat’s not to say pair programming is the ideal workflow for everyone, one developer said that, as an introvert, pair programming is tiring. But one of the key benefits is that it speeds up the software development process and allows you to ship more stable code, faster. Read more about [the upsides and downsides to pair programming for Agile software development](/blog/agile-pairing-sessions/).\n\n## Best practices for collaboration on non-engineering teams\n\nThe tools and strategies you use to communicate may vary based on where you sit in your company, but there are a few best practices that engineering teams use that can be applied to non-engineering teams, such as pairing up on design, code production, and even writing projects. Check out some of our blog posts about [how to use GitLab for collaborative project management within and across teams](/blog/collaboration-in-product-planning/).\n\n*   [**How designers collaborate sychronously**](/blog/synchronous-collaboration-as-a-remote-designer-at-gitlab/): Pair designers, coffee chats with team members across GitLab, weekly UX showcases, calls with product designers and product managers, and other strategies.\n*   **How Marketing uses GitLab for project management**: In [part one](/blog/gitlab-for-project-management-one/), we explain why the architecture of GitLab is so effective for project management, even for users in non-technical roles. In [part two](/blog/gl-for-pm-prt-2/), we share some real-life examples of how we use GitLab was used for successful project management.\n\n### Other inventive ideas for collaboration\n\nIn a stroke of genius, our Support team recognized that the weekly team all-hands meeting was getting a bit dull, and decided to change up the format and distribute it as a [podcast instead](/blog/how-we-turned-40-person-meeting-into-a-podcast/). The podcast format allowed team members to listen to the weekly update asynchronously, which is an essential component of communication for a globally distributed team such as ours. This is a great example of how thinking outside the box can improve how information is disseminated.\n\n## Some challenges with DevOps collaboration\n\n- **Maintaining security.** Security and compliance are critical for successful DevOps, but these areas have traditionally been siloed, making collaboration tricky at best.\n- **Too many people on a project**. Large and busy teams can struggle with communication and collaboration.\n- **Lots of communication options.** Using email, instant messaging, tickets, Zoom recordings, and more to house project info can cause things to slip through the cracks. \n- **Dealing with different personality types and working styles.** Individual needs and preferences can vary wildly and it can be a struggle to keep everyone on the same page.\n\n## Want more information on collaborative software development?\n\nTrust us, you’ll want to [bookmark this page](/topics/version-control/software-team-collaboration/) so examples of best practices for collaboration are just a click away for the times when you’re feeling stumped or siloed.\n\n[Watch the webcast](/webcast/collaboration-without-boundaries/) to learn how to bring cross-functional teams together using GitLab to deliver more stable software, faster.\n\nCover image by [Volodymyr Hryshchenko](https://unsplash.com/@lunarts?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/V5vqWC9gyEU)\n{: .note}\n",[749,9],{"slug":2006,"featured":6,"template":686},"collaboration-communication-best-practices","content:en-us:blog:collaboration-communication-best-practices.yml","Collaboration Communication Best Practices","en-us/blog/collaboration-communication-best-practices.yml","en-us/blog/collaboration-communication-best-practices",{"_path":2012,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2013,"content":2019,"config":2026,"_id":2028,"_type":14,"title":2029,"_source":16,"_file":2030,"_stem":2031,"_extension":19},"/en-us/blog/compliance-made-easy",{"title":2014,"description":2015,"ogTitle":2014,"ogDescription":2015,"noIndex":6,"ogImage":2016,"ogUrl":2017,"ogSiteName":670,"ogType":671,"canonicalUrls":2017,"schema":2018},"How to build a compliance program with ease","Compliance audits should not cause headaches. Learn how building compliance programs and carrying compliance audits effectively using GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667086/Blog/Hero%20Images/blog-compliance.jpg","https://about.gitlab.com/blog/compliance-made-easy","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to build a compliance program with ease\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Saumya Upadhyaya\"},{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2020-07-02\",\n      }",{"title":2014,"description":2015,"authors":2020,"heroImage":2016,"date":2023,"body":2024,"category":679,"tags":2025},[2021,2022],"Saumya Upadhyaya","Dov Hershkovitch","2020-07-02","The implimentation of a compliance program requires organizations to adopt processes that help comply with regulatory and legal requirements. GitLab makes it easy to wrestle the \"compliance beast\" but to understand what that really means it helps to take a look at this very complex and challenging area.\n\n## An effective compliance program: lots of moving parts\n\nCompliance processes are often costly, manual and cumbersome to implement and maintain. Even organizations that are advanced in compliance maturity still maintain compliance processes within spreadsheets, file storage systems (such as Google Drives or Dropbox) and emails, making wading through the documentation required to prove compliance extremely painful.\n\nFurther compounding this pain is the number of third party applications an organization uses to operate its business. The use of these tools and services add complexity because they’re all subject to the underlying policies and procedures the company has established. This means auditing not just your own organization’s processes, but those of your vendors.\n\nHowever, compliance is essential. With regulatory scrutiny being high, increasing cyber security breaches and the high costs of non compliance manifesting in the form of revenue loss, business disruptions, fines, damage to brand image, impacted stock prices and so on - the need for compliance is not lost on organizations. In fact, non compliance penalties [can be much lower](https://www.justice.gov/opa/pr/antitrust-division-announces-new-policy-incentivize-corporate-compliance) when an organization can demonstrate the presence of an effective compliance program.\n\n## Why is achieving an effective compliance program so difficult?\n\nIn spite of organizations acknowledging the importance of compliance, achieving an effective compliance program seems elusive.\n\nCurrently, there is a lot of administrative overhead associated with compliance. The task that gives most compliance professionals a headache is finding the documentation or evidence they need. With most organizations still using a combination of spreadsheets, drives and emails to manage their compliance programs and the added complexity of demonstrating compliance within their third-party tools or services, it is increasingly difficult for compliance teams to scale.\n\nIt can be even more daunting trying to keep track of the growing regulatory compliance requirements and internal controls to manage these requirements. In the cases where organizations have introduced additional Governance, Risk and Compliance (GRC) tools within their organizations, these tools are not integrated into their development and operational tools - thereby creating yet another compliance silo.\n\nDevelopment and operations teams perceive compliance-related activities as slowing down their velocity, creating an inherent friction with the compliance teams, thereby making compliance processes even slower and less effective.\n\n## Building your compliance program\n\nAny well defined compliance program requires internal controls that allow:\n\n1. Defining rules and policies aligned with your organizational or regulatory/legal requirements\n1. Generating and maintaining the evidence of policy adherence\n1. Enforcing the defined rules and policies\n1. Demonstrating compliance with easy-to-access and readable reports and evidence artifacts\n1. Ongoing risk assessments to detect and mitigate gaps in compliance\n\nAny compliance program that does not bring together all of these controls incurs the administrative overhead of maintenance. Organizations often run the risk of overspending on a disparate set of tools, creating data silos resulting in them being no better than when they started their compliance process.\n\n## GitLab makes compliance easy\n\nBeing a single application where developers, security and operations professionals congregate, GitLab is well positioned to automate your compliance processes to answer questions that may arise from your auditors or leadership teams.\n\n1. With [granular user roles and permissions](https://docs.gitlab.com/ee/user/permissions.html), GitLab allows you to enforce segregation of duties. You can easily define your organization’s policies regarding credentials, security scanning, and rules for approvers. Granular permission control also allows you to enforce approvers for determining what goes into production\n1. With [application security](/solutions/security-compliance/) being part of the pipeline, GitLab helps you to automate your information security compliance requirements\n1. GitLab helps you define [custom projects](https://docs.gitlab.com/ee/user/project/working_with_projects.html#project-templates) (such as HIPAA, SOX etc) to track adherence to various different compliance frameworks in a single place. Within the projects, GitLab issues and merge requests are also the central places to collaborate, maintain documents, track chain of custody and overrides, without maintaining these on disparate tools. Additionally, you can define a common set of policies to be applied to a set of projects labeled with a specific compliance framework (such as HIPAA, SOX etc)\n1. You can meet the traceability requirements for audits - such as user actions, permission changes, approval changes, logins, password changes and so on via [Audit Events](https://docs.gitlab.com/ee/administration/audit_events.html)\n1. GitLab also provides a consolidated view of various compliance signals such as merge request approvals in the [compliance dashboard](https://docs.gitlab.com/ee/user/compliance/compliance_report/index.html). Going forward, this compliance dashboard aims to provide compliance insights in a consolidated view with all relevant signals such as segregation of duties, framework compliance, license compliance, pipeline and MR results. The compliance dashboard will continue to evolve to include more data to save compliance professionals time when managing their GitLab compliance posture.\n\nLearn more about our Compliance Solution [here](/solutions/compliance/).\n\n## What’s next\n\nOur [vision for Compliance Management](/direction/dev/#manage-1) is strong. Watch Matt Gonzales, Senior Product Manager for the compliance group, talk about our vision.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/XFilPpXwVzs\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nConsider joining the [Compliance Special Interest Group](https://gitlab.com/gitlab-org/ux-research/-/issues/532) to help shape our direction for compliance management within GitLab.\n\n*Read more about compliance and GitLab:*\n\n[Compliance-as-code explained](/blog/get-started-compliance-as-code/)\n\n[How we chose our compliance framework](/blog/choosing-a-compliance-framework/)\n\n[Tracking agreements in GitLab just got easier](/blog/make-tracking-agreements-simple-compliance-dashboard/)\n\nCover image by joaosilas on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[9,916],{"slug":2027,"featured":6,"template":686},"compliance-made-easy","content:en-us:blog:compliance-made-easy.yml","Compliance Made Easy","en-us/blog/compliance-made-easy.yml","en-us/blog/compliance-made-easy",{"_path":2033,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2034,"content":2039,"config":2044,"_id":2046,"_type":14,"title":2047,"_source":16,"_file":2048,"_stem":2049,"_extension":19},"/en-us/blog/concurrent-devops",{"title":2035,"description":2036,"ogTitle":2035,"ogDescription":2036,"noIndex":6,"ogImage":1604,"ogUrl":2037,"ogSiteName":670,"ogType":671,"canonicalUrls":2037,"schema":2038},"Making the case for \"concurrent DevOps\"","DevOps goes by a lot of different names, but we’ve settled on concurrent DevOps for now at least.","https://about.gitlab.com/blog/concurrent-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Making the case for \"concurrent DevOps\"\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2019-07-17\",\n      }",{"title":2035,"description":2036,"authors":2040,"heroImage":1604,"date":2041,"body":2042,"category":679,"tags":2043},[851],"2019-07-17","\nWhat’s in a name? Quite a lot, apparently, when it comes to the software development space. Over the last few years companies have come up with a number of different names to describe their DevOps efforts – BizDevOps, DevSecOps, and even “modern software development.” But here at GitLab we prefer the term “[concurrent DevOps](/topics/devops/).”\n\nTo explain the thought process behind our choice of concurrent DevOps and what it all might mean moving forward, GitLab CEO [Sid Sijbrandij](/company/team/#sytses) sat down with chief marketing officer [Todd Barr](/company/team/#tbarr) and corporate marketing senior director [Melissa Smolensky](/company/team/#melsmo). It’s safe to say [a healthy discussion ensued](https://www.youtube.com/watch?v=bDTYHGEIeM0).\n\n## Why “concurrent”?\n\n“In GitLab you’re not passing (code) along multiple stages,” explains Sid. “You don’t wait until something is ready and then send it off to some security testing. People can work in parallel. We call it concurrent because it can be parallel but it doesn't have to be.\"\n\nAnd concurrent DevOps stands out from what Sid calls “sequential DevOps.” Because no one is waiting for a handoff, or permission, everything goes faster, Sid offers. “I think concurrent DevOps could be a rallying cry,” he says. “If we can spread that idea, make it bigger than GitLab, it’s going to be easier for people to demand something like that and trust (us) with other solutions.”\n\n## Start with a mission (statement)\n\nBut Todd needs convincing that concurrent DevOps is the right term. “Concurrent DevOps isn’t really a category, it’s a benefit statement,” he says. He suggests a different approach, using our mission statement [“everyone can contribute”](/company/mission/#mission) as a starting point. “I think that has a lot of legs if we actually put more thought into what that means and what category that would mean if we’re creating a platform where everyone can contribute.”\n\n> Concurrent DevOps could be a rallying cry if we can spread that idea – make it bigger than GitLab\n\nSid agrees, in theory, that GitLab is creating a broader platform but doesn’t think the time is right, yet, to make that our main marketing message. “Yes, our visions are bigger. But if you’re too far ahead of where people think you are, you might fall flat on your face. If we can own DevOps I’d settle for that for the next few years.” Melissa agrees, pointing to the fact that enterprises still have a long way to go to integrate DevOps into their development lifecycles.\n\n## Size matters\n\nAnd there’s no question the DevOps market is sufficiently large to support GitLab’s growth, Sid says, referring to a report from Grand View Research that forecasts the market will be worth [nearly $13 billion in 2025](https://www.grandviewresearch.com/press-release/global-development-to-operations-devops-market). So the market opportunity is there, Todd agrees, and offers that both he and Melissa have been in the DevOps space so long they’ve sort of taken it for granted, which is why he suggested different terminology. “DevOps has become a term that's almost synonymous with future software lifecycle development,” he says. “But there's a people element that we've got to help people understand. With concurrent DevOps we're trying to be more inclusive in the process, or that's at least one benefit.”\n\nWe need to make the case that concurrent DevOps is better, Sid stresses, even if we eventually change the name later on. “Our big benefit is a single application for the entire DevOps lifecycle.”\n\n Watch the entire video:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/bDTYHGEIeM0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nPhoto by [YIFEI CHEN](https://unsplash.com/photos/FPMRxKd7MxI?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/spiral-lights?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,728],{"slug":2045,"featured":6,"template":686},"concurrent-devops","content:en-us:blog:concurrent-devops.yml","Concurrent Devops","en-us/blog/concurrent-devops.yml","en-us/blog/concurrent-devops",{"_path":2051,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2052,"content":2058,"config":2063,"_id":2065,"_type":14,"title":2066,"_source":16,"_file":2067,"_stem":2068,"_extension":19},"/en-us/blog/congratulations-to-hashicorp",{"title":2053,"description":2054,"ogTitle":2053,"ogDescription":2054,"noIndex":6,"ogImage":2055,"ogUrl":2056,"ogSiteName":670,"ogType":671,"canonicalUrls":2056,"schema":2057},"Congratulations to HashiCorp! Enjoy the cake!","We’re thrilled to see our open source and tech partner HashiCorp join us in the public market. Public companies like HashiCorp, MongoDB, Confluent, and GitLab show that with the right business models, open source can be highly profitable. Here’s a look at HashiCorp’s history, our partnership, and a nod to the future.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663383/Blog/Hero%20Images/tanuki-bg-full.png","https://about.gitlab.com/blog/congratulations-to-hashicorp","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Congratulations to HashiCorp! Enjoy the cake!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2021-12-09\",\n      }",{"title":2053,"description":2054,"authors":2059,"heroImage":2055,"date":2060,"body":2061,"category":679,"tags":2062},[745],"2021-12-09","![Cake with message reading \"Congrats on your IPO!\"](https://about.gitlab.com/images/blogimages/hashicorp-cake.jpg)\n\nFrom one open source DevOps company to another, we want to congratulate HashiCorp on becoming a public company! In celebration of your debut in the public markets we sent you a cake!\n\nGitLab and HashiCorp have been partners since 2019. And not only do we work well together, we have a lot in common: both companies have strong business models that are open-core providing both open-source and proprietary features for DevOps practitioners enabling them to create safer software faster. We may solve very different problems in the DevOps ecosystem, but that’s what makes us great partners.\n\n## HashiCorp: always ahead of the curve\n\nAlthough co-founder Mitchell Hashimoto didn’t officially incorporate HashiCorp (named after him) until 2012, he was making contributions to the open source community [from the time he was a teen](https://thenewstack.io/new-stack-makers-mitchell-hashimoto-vagrant-containers-growing-open-source/). And it’s clear Mitchell and team approached the business with a fresh perspective: HashiCorp’s Vagrant was the first automated provisioning of developer environments, which was very useful for onboarding and demos. When Docker became more popular, Vagrant added a Docker provider, making it more usable, even with Docker and Docker Compose later around.\n\nThe team made another bold move in 2014, rolling out the HashiCorp configuration language (HCL) as an alternative to YAML. The step got developers talking and taking sides, but also thinking about what might work best.\n\nAll of those efforts led to perhaps the most ground-breaking part of HashiCorp’s strategy: Vault. The company’s solution that safely stores and controls access to tokens, secrets, API keys, and more, is not just successful, it’s revolutionary. HashiCorp has turned the idea of secrets keeping on its head, by not just allowing companies to store secrets away, but to also have to renew them regularly, kind of like changing the locks on your door on a regular schedule, rather than giving out lots of keys. Clearly this is a paradigm shift for security.\n\n## HashiCorp and GitLab together\n\nVault’s a breakthrough technology for HashiCorp (don’t forget you can use GitLab with Vault to set up [GitLab OpenID connect for authentication](https://docs.gitlab.com/ee/integration/vault.html) or access your [secrets securely in CI](https://docs.gitlab.com/ee/ci/secrets/) as variables) but it’s just one of many that we integrate with.\n\n### Terraform and the GitLab DevOps Platform\n\n[Terraform](https://www.terraform.io) plays a critical role in GitLab’s GitOps/Infrastructure as Code (IaC) workflows, lowering the barriers to entry for teams to adopt Terraform while enabling them to use more stages on the DevOps platform. GitLab’s Terraform integration allows teams to manage the Terraform state in GitLab without external configuration backends.\n\nWe have created the [GitLab Terraform Provider](https://docs.gitlab.com/ee/user/infrastructure/iac/#the-gitlab-terraform-provider) to manage resources on your GitLab instance like groups, projects, users, and more to improve productivity by eliminating an engineer’s dependence on provisioning requests.\n\nA merge request is the center of all collaboration on the DevOps platform. It is important to verify how changes will affect your infrastructure, taking advantage of the [Terraform integration with merge requests](https://docs.gitlab.com/ee/user/infrastructure/iac/mr_integration.html). You can see the planned changes to your infrastructure without leaving the scope of a merge request review at the same time.\n\nUsing GitLab as a [Terraform Module Registry](https://docs.gitlab.com/ee/user/packages/terraform_module_registry/) allows you to publish and reference private Terraform modules in your project’s infrastructure registry, ensuring it’s possible to reuse modules across projects securely. Along with our [IaC security scanning](https://about.gitlab.com/releases/2021/11/22/gitlab-14-5-released/#introducing-infrastructure-as-code-iac-security-scanning) as part of the verify stage, you can safely maintain your infrastructure with ease.\n\n### Terraform and GitLab.com\n\nTerraform is used to manage all the environments of [GitLab.com’s infrastructure](https://gitlab.com/gitlab-com/gitlab-com-infrastructure/) in a single project, allowing collaboration across the entire engineering organization. It is also playing a critical role in our ongoing [migration to Kubernetes](https://about.gitlab.com/handbook/engineering/infrastructure/production/kubernetes/gitlab-com/). Want to deploy a stateful application quickly? GitLab’s [five-minute production app](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template) template leverages the power of Terraform to get you from idea to production in minutes.\n\n## We’re all-remote\n\nHashiCorp is a remote-first, distributed organization and publicly shares [proven principles](https://works.hashicorp.com) for everyone to learn. GitLab shares this passion for expanding access to opportunity, bolstering global communities, and building more inclusive workplaces where everyone can contribute.\n\n## We see the promise of the future\n\nThe successes of companies like GitLab and HashiCorp, as well as MongoDB and Confluent, on the open market show providing a free tier and commercial offering can be a highly profitable business model for open source technologies and we believe the DevOps market potential is just starting to be tapped.\n\nIn the [words of Dave Bullock](https://about.gitlab.com/blog/wag-labs-blog-post/), former Director of engineering at Wag!: “_We use GitLab with Terraform to test, review, save, and deploy all of our infrastructure as well as the application…The original idea was to just use GitLab as our CI platform. But as we built that out, we started using it for more and more tasks, and ended up using it for our full CI/CD pipeline._”  This is an example of the power of the DevOps Platform. GitLab’s partnership with HashiCorp has made it easier for customers to use more stages of the DevOps Platform. \n\nWe’re joining the global chorus in wishing HashiCorp the best of luck with its public offering.",[9,682,231],{"slug":2064,"featured":6,"template":686},"congratulations-to-hashicorp","content:en-us:blog:congratulations-to-hashicorp.yml","Congratulations To Hashicorp","en-us/blog/congratulations-to-hashicorp.yml","en-us/blog/congratulations-to-hashicorp",{"_path":2070,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2071,"content":2077,"config":2084,"_id":2086,"_type":14,"title":2087,"_source":16,"_file":2088,"_stem":2089,"_extension":19},"/en-us/blog/connecting-gitlab-and-pantheon-streamline-wordpress-drupal-workflows",{"title":2072,"description":2073,"ogTitle":2072,"ogDescription":2073,"noIndex":6,"ogImage":2074,"ogUrl":2075,"ogSiteName":670,"ogType":671,"canonicalUrls":2075,"schema":2076},"Streamlining Drupal and WordPress with GitLab and Pantheon","Our guest author, a Developer Programs Engineer at Pantheon, shares how to automate WordPress deployments using GitLab CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680516/Blog/Hero%20Images/gitlab-pantheon.png","https://about.gitlab.com/blog/connecting-gitlab-and-pantheon-streamline-wordpress-drupal-workflows","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to connect GitLab and Pantheon to streamline Drupal and WordPress workflows\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andrew Taylor\"}],\n        \"datePublished\": \"2019-03-26\",\n      }",{"title":2078,"description":2073,"authors":2079,"heroImage":2074,"date":2081,"body":2082,"category":791,"tags":2083},"How to connect GitLab and Pantheon to streamline Drupal and WordPress workflows",[2080],"Andrew Taylor","2019-03-26","\n\nAs a member of the developer relations team at [Pantheon](https://pantheon.io), I’m always looking for new ways to help WordPress and Drupal developers solve workflow problems with automation. To this end, I love exploring new tools and how they can be used effectively together.\n\n### One frequent problem I see teams facing is the dreaded single staging server.\n\nIt’s not fun to wait in line for your turn to use the staging server or to send clients a URL and tell them to review some work but ignore other, incomplete pieces.\n\n[Multidev environments](https://pantheon.io/docs/multidev/), one of Pantheon’s advanced developer tools, solves this issue by allowing environments matching Git branches to be created on demand. Each multidev environment has its own URL and database, making independent work, QA, and approval possible without developers stepping on each other's toes.\n\nHowever, Pantheon doesn’t provide source control management (SCM) or continuous integration and continuous deployment (CI/CD) tooling. Instead, the platform is flexible enough to be integrated with your preferred tools.\n\n### The next problem I see consistently is teams using different tools to manage development work and to build and deploy that work.\n\nFor example, using one tool for SCM and something else for CI/CD. Having to jump between tools to edit code and diagnose failing jobs is cumbersome.\n\n[GitLab](/) solves this problem by providing a full suite of development workflow tools, such as SCM, with features like issues and merge requests, best-in-class CI/CD, and a container registry, to name a few. I haven't come across another application that is so complete to manage development workflow.\n\nAs someone who loves automation, I explored connecting Pantheon to GitLab so that commits to the master branch on GitLab deploy to the main dev environment on Pantheon. Additionally, merge requests on GitLab can create and deploy code to Pantheon multidev environments.\n\nThis tutorial will walk you through setting up the connection between GitLab and Pantheon so you, too, can streamline your WordPress and Drupal workflow.\n\nThis can be done with [GitLab repository mirroring](https://docs.gitlab.com/ee/user/project/repository/repository_mirroring.html), but we will be setting it up manually to get some experience with [GitLab CI](https://docs.gitlab.com/ee/ci/) and have the ability to expand beyond just deployment in the future.\n\n## Background\n\nFor this post, you need to know that Pantheon breaks each site down into three components: code, database, and files.\n\nThe code portion of a Pantheon site includes the CMS files, such as WordPress core, plugins and themes. These files are managed in a [Git repository](https://git-scm.com/book/en/v2/Git-Basics-Getting-a-Git-Repository) hosted by Pantheon, which means we can deploy code from GitLab to Pantheon with Git.\n\nWhen Pantheon refers to files, it is the media files, such as images, for your site. These are typically uploaded by site users and are ignored in Git.\n\nYou can [create a free account](https://pantheon.io/register), learn more about the [Pantheon workflow](https://pantheon.io/docs/pantheon-workflow), or [sign up for a live demo](https://pantheon.io/live-demo) on pantheon.io.\n\n## Assumptions\n\nMy project is named `pantheon-gitlab-blog-demo`, both on Pantheon and GitLab. You should use a unique project name. This tutorial uses a WordPress site. Drupal can be substituted, but some modification will be needed.\n\nI'll also be using the [Git command line](https://git-scm.com/book/en/v2/Getting-Started-The-Command-Line) but you can substitute a [graphical interface](https://git-scm.com/book/en/v2/Appendix-A%3A-Git-in-Other-Environments-Graphical-Interfaces) if you prefer.\n\n## Create the projects\n\nFirst up, create a [new GitLab project](https://docs.gitlab.com/ee/user/project/working_with_projects.html#create-a-project) – we'll come back to this in a little bit.\n\nNow, [create a new WordPress site on Pantheon](https://pantheon.io/docs/launch-wordpress/). After your new site is created, you will need to install WordPress for the site dashboard.\n\n_You might be tempted to make some changes, such as adding or removing plugins, but please refrain. We haven't connected the site to GitLab yet and want to make sure all code changes, e.g. adding or removing plugins, go through GitLab._\n\nAfter WordPress is installed, go back to the Pantheon site dashboard and change the development mode to Git.\n\n![Pantheon Dashboard](https://about.gitlab.com/images/blogimages/pantheon-dashboard-after-fresh-wordpress-install.png){: .shadow.medium.center}\n\n## Initial commit to GitLab\n\nNext, we need to get the starting WordPress code from the Pantheon site over to GitLab. In order to do this, we will clone the code from the Pantheon site Git repository locally, then push it to the GitLab repository.\n\nTo make this easier, and more secure, [add an SSH key to Pantheon](https://pantheon.io/docs/ssh-keys/) to avoid entering your password when cloning Pantheon Git repository. While you're at it, [add an SSH key to GitLab](https://docs.gitlab.com/ee/ssh/) as well.\n\nTo do this, clone the Pantheon site locally by copying the command in the Clone with Git drop-down field from the site dashboard.\n\n![CPantheon git connection](https://about.gitlab.com/images/blogimages/pantheon-git-connection-info.png){: .shadow.center}\n\n_If you need help, see the [Pantheon Start With Git](https://pantheon.io/docs/git/#clone-your-site-codebase) documentation._\n\nNext, we want to change the `git remote origin` to point to GitLab, instead of Pantheon. This can be done with the [`git remote` command](https://git-scm.com/docs/git-remote).\n\nHead over to your GitLab project and grab the repository URL, which can be found at in the Clone drop-down of the project details screen. Be sure to use the Clone with SSH variant of the GitLab repository URL, since we set up an SSH key earlier.\n\n![Gitlab git connection](https://about.gitlab.com/images/blogimages/gitlab-git-connection-info.png){: .shadow.medium.center}\n\nThe default `git remote` for the local copy of our code repository is `origin`. We can change it with `git remote set-url origin [GitLab repository URL]`, replacing `[GitLab repository URL]` with your actual GitLab repository URL.\n\nFinally, run `git push origin master --force` to send the WordPress code from the Pantheon site to GitLab.\n\n_The --force flag is only needed as part of this one-time step. Subsequent `git push` commands to GitLab won't need it._\n\n## Set up credentials and variables\n\nRemember how we added an SSH key locally to authorize with Pantheon and GitLab? Well, an SSH token can also be used to authorize GitLab and Pantheon.\n\nGitLab has some great documentation, and we will be looking at the [SSH keys when using the Docker executor section of the Using SSH keys with GitLab CI/CD doc](https://docs.gitlab.com/ee/ci/ssh_keys/#ssh-keys-when-using-the-docker-executor).\n\nAt this point, we will need to do the first two steps: _Create a new SSH key pair locally with ssh-keygen and Add the private key as a variable to your project._\n\nWhen done, `SSH_PRIVATE_KEY` should be set as a [GitLab CI/CD Environment Variables](https://docs.gitlab.com/ee/ci/variables/) in the project settings.\n\nTo take care of the third and fourth steps, create `.gitlab-ci.yml` file with the following contents:\n\n```\nbefore_script:\n  # See https://docs.gitlab.com/ee/ci/ssh_keys/\n  - eval $(ssh-agent -s)\n  - echo \"$SSH_PRIVATE_KEY\" | tr -d '\\r' | ssh-add - > /dev/null\n  - mkdir -p $HOME/.ssh && echo \"StrictHostKeyChecking no\" >> \"$HOME/.ssh/config\"\n  - git config --global user.email \"$GITLAB_USER_EMAIL\"\n  - git config --global user.name \"Gitlab CI\"\n  ```\n\nDon't commit the `.gitlab-ci.yml` file just yet, we will be adding more to it in the next section.\n\nNow, we need to take care of step 5, _add the public key from the one you created in the first step to the services that you want to have an access to from within the build environment._\n\nIn our case, the service we want to access from GitLab is Pantheon. Follow the Pantheon doc to [Add Your SSH Key to Pantheon](https://pantheon.io/docs/ssh-keys/#add-your-ssh-key-to-pantheon) to complete this step.\n\n_Be sure that the private SSH key is in GitLab and the public key is on Pantheon_\n\nWe will also need to set some additional environment variables. The first one should be named PANTHEON_SITE, and the value will be the machine name of your `Pantheon site`. and the value will be the *machine name* of your Pantheon site.\n\nYou can get the machine name from the end of the Clone with Git command. Since you already cloned the site locally, it will be the directory name of your local repository.\n\n![wordpress machine name](https://about.gitlab.com/images/blogimages/pantheon-machine-name.png){: .shadow.medium.center}\n\nThe next GitLab CI environment variable to set is `PANTHEON_GIT_URL`, which will be the Git repository URL of the Pantheon site that we used earlier.\n\n_Enter just the SSH repository URL, leaving off `git clone` and the site machine name at the end._\n\nPhew! Now that setup is done, we can move on to finishing our `.gitlab-ci.yml` file.\n\n## Create the deployment job\n\nWhat we will be doing with GitLab CI initially is very similar to what we did with Git repositories earlier. This time though, we will add the Pantheon repository as a second Git remote and then push the code from GitLab to Pantheon.\n\nTo do this, we will set up a [stage](https://docs.gitlab.com/ee/ci/yaml/#stages) named `deploy` and a [job](https://docs.gitlab.com/ee/ci/jobs/) named `deploy:dev`, as it will deploy to the dev environment on Pantheon. The resulting `.gitlab-ci.yml` file should look like this:\n\n```\nstages:\n- deploy\n\nbefore_script:\n  # See https://docs.gitlab.com/ee/ci/ssh_keys/\n  - eval $(ssh-agent -s)\n  - echo \"$SSH_PRIVATE_KEY\" | tr -d '\\r' | ssh-add - > /dev/null\n  - mkdir -p $HOME/.ssh && echo \"StrictHostKeyChecking no\" >> \"$HOME/.ssh/config\"\n  - git config --global user.email \"$GITLAB_USER_EMAIL\"\n  - git config --global user.name \"Gitlab CI\"\n\ndeploy:dev:\n  stage: deploy\n  environment:\n    name: dev\n    url: https://dev-$PANTHEON_SITE.pantheonsite.io/\n  script:\n    - git remote add pantheon $PANTHEON_GIT_URL\n    - git push pantheon master --force\n  only:\n    - master\n```\n\n`SSH_PRIVATE_KEY`, `PANTHEON_SITE`, and `PANTHEON_GIT_URL` should all look familiar - they are the environment variables we set up earlier. Having environment variables will allow us to re-use the values multiple times in our `.gitlab-ci.yml` file, while having one place to update them, should they change in the future.\n\nFinally, add, commit, and push the `.gitlab-ci.yml` file to send it to GitLab.\n\n## Verify the deployment\n\nIf everything was done correctly, the `deploy:dev` job run on GitLab CI/CD, succeed and send the `.gitlab-ci.yml` commit to Pantheon. Let's take a look!\n\n![deploy job](https://about.gitlab.com/images/blogimages/gitlab-deploy-dev-job.png){: .shadow.center}\n\n![deploy job passing](https://about.gitlab.com/images/blogimages/gitlab-deploy-dev-job-passed.png){: .shadow.center}\n\n![gitlab commit on pantheon dev](https://about.gitlab.com/images/blogimages/gitlab-commits-on-pantheon-dev.png){: .shadow.center}\n\n## Sending merge request branches to Pantheon\n\nThis next section makes use of my favorite Pantheon feature, [multidev](https://pantheon.io/docs/multidev), which allows you to create additional Pantheon environments on demand associated with Git branches.\n\nThis section is entirely optional as [multidev access is restricted](https://pantheon.io/docs/multidev-faq/), however, if you do have multidev access, having GitLab merge requests automatically create multidev environments on Pantheon is a huge workflow improvement.\n\nWe will start by making a new Git branch locally with `git checkout -b multidev-support`. Now, let's edit `.gitlab-ci.yml` again.\n\nI like to use the merge request number in the Pantheon environment name. For example, the first merge request would be `mr-1`, the second would be `mr-2`, and so on.\n\nSince the merge request changes, we need to define these Pantheon branch names dynamically. GitLab makes this easy by providing [predefined environment](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html) variables.\n\nWe can use `$CI_MERGE_REQUEST_IID`, which provides the merge request number. Let's put that to use, along with our global environment variables from earlier, and add a new deploy:multidev job to the end of our `.gitlab-ci.yml` file.\n\n```\ndeploy:multidev:\n  stage: deploy\n  environment:\n    name: multidev/mr-$CI_MERGE_REQUEST_IID\n    url: https://mr-$CI_MERGE_REQUEST_IID-$PANTHEON_SITE.pantheonsite.io/\n  script:\n    # Checkout the merge request source branch\n    - git checkout $CI_COMMIT_REF_NAME\n    # Add the Pantheon git repository as an additional remote\n    - git remote add pantheon $PANTHEON_GIT_URL\n    # Push the merge request source branch to Pantheon\n    - git push pantheon $CI_COMMIT_REF_NAME:mr-$CI_MERGE_REQUEST_IID --force\n  only:\n    - merge_requests\n```\n\nThis should look very similar to our `deploy:dev` job, only pushing a branch to Pantheon instead of `master`.\n\nAfter you add and commit the updated `.gitlab-ci.yml` file, push this new branch to GitLab with `git push -u origin multidev-support`.\n\nNext, let's create a new merge request from our `multidev-support` branch by following the _Create merge request_ prompt.\n\n![create merge request](https://about.gitlab.com/images/blogimages/gitlab-create-merge-request-prompt.png){: .shadow.medium.center}\n\nAfter creating the merge request, look for the  CI/CD job `deploy:multidev` to run.\n\n![multidev deploy success](https://about.gitlab.com/images/blogimages/multidev-branch-deploy-success.png){: .shadow.medium.center}\n\nLook at that – a new branch was sent to Pantheon. However, when we go to the multidev section of the site dashboard on Pantheon there isn't a new multidev environment.\n\n![multidev branch](https://about.gitlab.com/images/blogimages/pantheon-no-multidev-environments.png){: .shadow.medium.center}\n\nLet's look at the _Git_ Branches section.\n\n![mr branch](https://about.gitlab.com/images/blogimages/pantheon-mr-1-branch.png){: .shadow.medium.center}\n\nOur `mr-1` branch did make it to Pantheon after all. Go ahead and create an environment from the `mr-1` branch.\n\n![create multidev](https://about.gitlab.com/images/blogimages/pantheon-mr-1-multidev-creation.png){: .shadow.medium.center}\n\nOnce the multidev environment has been created, head back to GitLab and look at the _Operations > Environments_ section. You will notice entries for `dev` and `mr-1`.\n\nThis is because we added an `environment` entry with `name` and `url` to our CI/CD jobs. If you click on the open environment icon, you will be taken to the URL for the multidev on Pantheon.\n\n## Automating multidev creation\n\nWe _could_ stop here and try to remember to create a multidev environment each time there is a new merge request, but we can automate that process as well!\n\nPantheon has a command line tool, [Terminus](https://pantheon.io/docs/terminus/), that allows you to interact with the platform in an automated fashion. Terminus will allow us to provision our multidev environments from the command line – perfect for use in [GitLab CI](https://docs.gitlab.com/ee/ci/).\n\nWe will need a new merge request to test this, so let's create a new branch with `git checkout -b auto-multidev-creation`.\n\nIn order to use Terminus in GitLab CI/CD jobs we will need a machine token to authenticate with Terminus and a container image with Terminus available.\n\n[Create a Pantheon machine token](https://pantheon.io/docs/machine-tokens/#create-a-machine-token), save it to a safe place, and add it as a global GitLab environment variable named `PANTHEON_MACHINE_TOKEN`.\n\n_If you don't remember how to add GitLab environment variables, scroll up to where we defined `PANTHEON_SITE` earlier in the tutorial._\n\n## Building a Dockerfile with Terminus\n\nIf you don't have Docker or aren't comfortable working with `Dockerfile` files, you can use my image `registry.gitlab.com/ataylorme/pantheon-gitlab-blog-demo:latest` and skip this section.\n\n[GitLab has a container registry](https://docs.gitlab.com/ee/user/packages/container_registry/index.html) that allows us to build and host a Dockerfile for use in our project. Let's create a Dockerfile that has Terminus available, so we can interact with Pantheon.\n\nTerminus is a PHP-based command line tool, so we will start with a PHP image. I prefer to install Terminus via Composer so I'll be using [the official Docker Composer image](https://hub.docker.com/_/composer) as a base. Create a `Dockerfile` in your local repository directory with the following contents:\n\n```\n# Use the official Composer image as a parent image\nFROM composer:1.8\n\n# Update/upgrade apk\nRUN apk update\nRUN apk upgrade\n\n# Make the Terminus directory\nRUN mkdir -p /usr/local/share/terminus\n\n# Install Terminus 2.x with Composer\nRUN /usr/bin/env COMPOSER_BIN_DIR=/usr/local/bin composer -n --working-dir=/usr/local/share/terminus require pantheon-systems/terminus:\"^2\"\n```\nFollow the _Build and push images_ section of the [container registry documentation](https://gitlab.com/help/user/project/container_registry#build-and-push-images) to build an image from the `Dockerfile` and upload it to GitLab.\n\nVisit the _Registry_ section of your GitLab project. If things went according to plan you will see your image listed. Make a note of the image tag link, as we will need to use that in our `.gitlab-ci.yml` file.\n\n![container registry](https://about.gitlab.com/images/blogimages/gitlab-container-registry.png){: .shadow.center}\n\nThe `script` section of our `deploy:multidev` job is starting to get long, so let's move it to a dedicated file. Create a new file `private/multidev-deploy.sh` with the following contents:\n\n```\n#!/bin/bash\n\n# Store the mr- environment name\nexport PANTHEON_ENV=mr-$CI_MERGE_REQUEST_IID\n\n# Authenticate with Terminus\nterminus auth:login --machine-token=$PANTHEON_MACHINE_TOKEN\n\n# Checkout the merge request source branch\ngit checkout $CI_COMMIT_REF_NAME\n\n# Add the Pantheon Git repository as an additional remote\ngit remote add pantheon $PANTHEON_GIT_URL\n\n# Push the merge request source branch to Pantheon\ngit push pantheon $CI_COMMIT_REF_NAME:$PANTHEON_ENV --force\n\n# Create a function for determining if a multidev exists\nTERMINUS_DOES_MULTIDEV_EXIST()\n{\n    # Stash a list of Pantheon multidev environments\n    PANTHEON_MULTIDEV_LIST=\"$(terminus multidev:list ${PANTHEON_SITE} --format=list --field=id)\"\n\n    while read -r multiDev; do\n        if [[ \"${multiDev}\" == \"$1\" ]]\n        then\n            return 0;\n        fi\n    done \u003C\u003C\u003C \"$PANTHEON_MULTIDEV_LIST\"\n\n    return 1;\n}\n\n# If the mutltidev doesn't exist\nif ! TERMINUS_DOES_MULTIDEV_EXIST $PANTHEON_ENV\nthen\n    # Create it with Terminus\n    echo \"No multidev for $PANTHEON_ENV found, creating one...\"\n    terminus multidev:create $PANTHEON_SITE.dev $PANTHEON_ENV\nelse\n    echo \"The multidev $PANTHEON_ENV already exists, skipping creating it...\"\nfi\n```\n\nThe script is in the `private` directory as [it is not web accessible on Pantheon](https://pantheon.io/docs/private-paths/). Now that we have a script for our multidev logic, update the `deploy:multidev` section of `.gitlab-ci.yml` so that it looks like this:\n\n```\ndeploy:multidev:\n  stage: deploy\n  environment:\n    name: multidev/mr-$CI_MERGE_REQUEST_IID\n    url: https://mr-$CI_MERGE_REQUEST_IID-$PANTHEON_SITE.pantheonsite.io/\n  script:\n    # Run the multidev deploy script\n    - \"/bin/bash ./private/multidev-deploy.sh\"\n  only:\n    - merge_requests\n```\n\nIn order to make sure our jobs run with the custom image created earlier, add an `image` definition with the registry URL to `.gitlab-ci.yml`. My complete `.gitlab-ci.yml` file now looks like this:\n\n```\nimage: registry.gitlab.com/ataylorme/pantheon-gitlab-blog-demo:latest\n\nstages:\n- deploy\n\nbefore_script:\n  # See https://docs.gitlab.com/ee/ci/ssh_keys/\n  - eval $(ssh-agent -s)\n  - echo \"$SSH_PRIVATE_KEY\" | tr -d '\\r' | ssh-add - > /dev/null\n  - mkdir -p $HOME/.ssh && echo \"StrictHostKeyChecking no\" >> \"$HOME/.ssh/config\"\n  - git config --global user.email \"$GITLAB_USER_EMAIL\"\n  - git config --global user.name \"Gitlab CI\"\n\ndeploy:dev:\n  stage: deploy\n  environment:\n    name: dev\n    url: https://dev-$PANTHEON_SITE.pantheonsite.io/\n  script:\n    - git remote add pantheon $PANTHEON_GIT_URL\n    - git push pantheon master --force\n  only:\n    - master\n\ndeploy:multidev:\n  stage: deploy\n  environment:\n    name: multidev/mr-$CI_MERGE_REQUEST_IID\n    url: https://mr-$CI_MERGE_REQUEST_IID-$PANTHEON_SITE.pantheonsite.io/\n  script:\n    # Run the multidev deploy script\n    - \"/bin/bash ./private/multidev-deploy.sh\"\n  only:\n    - merge_requests\n```\n\nAdd, commit, and push `private/multidev-deploy.sh` and `.gitlab-ci.yml`. Now, head back to GitLab and wait for the CI/CD job to finish. The multidev creation takes a few minutes, so be patient.\n\nWhen it is finished, go check out the multidev list on Pantheon. Voila! The `mr-2` multidev is there.\n\n![mr-2](https://about.gitlab.com/images/blogimages/pantheon-mr-2-multidev.png){: .shadow.medium.center}\n\n## Conclusion\n\nOpening a merge request and having an environment spin up automatically is a powerful addition to any team's workflow.\n\nBy leveraging the powerful tools offered by both GitLab and Pantheon, we can connect GitLab to Pantheon in an automated fashion.\n\nSince we used GitLab CI/CD, there is room for growth in our workflow as well. Here are a few ideas to get you started:\n* Add a build step.\n* Add automated testing.\n* Add a job to enforce coding standards.\n* Add [dynamic application security testing](https://docs.gitlab.com/ee/user/application_security/dast/).\n\nDrop me a line with any thoughts you have on GitLab, Pantheon, and automation.\n\nP.S. Did you know Terminus, Pantheon’s command line tool, [is extendable via plugins](https://pantheon.io/docs/terminus/plugins/)?\n\nOver at Pantheon, we have been hard at work on version 2 of our [Terminus Build Tools Plugin](https://github.com/pantheon-systems/terminus-build-tools-plugin/), complete with GitLab support. If you don't want to do all this setup for each project, I encourage you to check it out and help us test the v2 beta. The terminus `build:project:create` command just needs a Pantheon token and GitLab token. From there, it will spin up one of our example projects, complete with Composer and automated testing, create a new project on GitLab, a new site on Pantheon, and connect the two by setting up environment variables and SSH keys.\n\n### About the guest author\n\nAndrew Taylor is a Developer Programs Engineer at [Pantheon](https://pantheon.io/).\n",[9,231,267,683],{"slug":2085,"featured":6,"template":686},"connecting-gitlab-and-pantheon-streamline-wordpress-drupal-workflows","content:en-us:blog:connecting-gitlab-and-pantheon-streamline-wordpress-drupal-workflows.yml","Connecting Gitlab And Pantheon Streamline Wordpress Drupal Workflows","en-us/blog/connecting-gitlab-and-pantheon-streamline-wordpress-drupal-workflows.yml","en-us/blog/connecting-gitlab-and-pantheon-streamline-wordpress-drupal-workflows",{"_path":2091,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2092,"content":2098,"config":2104,"_id":2106,"_type":14,"title":2107,"_source":16,"_file":2108,"_stem":2109,"_extension":19},"/en-us/blog/create-vision",{"title":2093,"description":2094,"ogTitle":2093,"ogDescription":2094,"noIndex":6,"ogImage":2095,"ogUrl":2096,"ogSiteName":670,"ogType":671,"canonicalUrls":2096,"schema":2097},"GitLab's 2019 product vision for DevOps Create","Take an early look at where collaboration, merge requests, and the Web IDE are heading in 2019.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678812/Blog/Hero%20Images/web-ide-cover.jpg","https://about.gitlab.com/blog/create-vision","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's 2019 product vision for DevOps Create\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"James Ramsay\"}],\n        \"datePublished\": \"2018-09-21\",\n      }",{"title":2093,"description":2094,"authors":2099,"heroImage":2095,"date":2101,"body":2102,"category":299,"tags":2103},[2100],"James Ramsay","2018-09-21","\nGitLab is a single application, so for convenience we organize by [DevOps stages](/handbook/product/categories/). The Create stage of the DevOps lifecycle is about creating code, and includes Git repositories, merge requests, code review, the Web IDE, wikis, and snippets.\n\nManaging source code is at the heart of GitLab – it's in our name and it powers your applications. This year we've shipped many important improvements to make it easier to go from idea to production. The [Web IDE](/releases/2018/06/22/gitlab-11-0-released/#cicd-pipeline-status-and-job-traces-in-the-web-ide) makes it easy for anyone to contribute, and faster to work with merge requests. [Squash and Merge](/releases/2018/06/22/gitlab-11-0-released/#squash-and-merge-in-gitlab-core-and-gitlabcom-free), and [Rebase and Fast-forward Merge](/releases/2018/01/22/gitlab-10-4-released/#rebase-and-fast-forward-in-ce) are available in GitLab CE. [File locking](/releases/2018/02/22/gitlab-10-5-released/#git-lfs-2-locking-support) is integrated with Git LFS. [Maintainers can push to forks](/releases/2018/03/22/gitlab-10-6-released/#maintainers-can-push-to-mr-from-fork). And there is much more to come this year, like [batch comments](https://gitlab.com/gitlab-org/gitlab-ee/issues/1984) for merge requests, and [suggested approvers](https://gitlab.com/gitlab-org/gitlab-ee/issues/5382) based on code owners.\n\nHere are some of the things we're thinking about for 2019:\n\n- [Collaboration](#collaboration)\n- [Code review and approvals](#code-review-and-approvals)\n- [Web IDE](#web-ide)\n- [Summary](#summing-up)\n\nAs our plans are always in draft, we'd love to hear your thoughts, and any suggestions.\n\n### Collaboration\n\nGit's distributed design made new collaborative workflows possible, and forking has made collaboration even easier. Forking is the workflow of choice for open source, and for the same reasons it is also great for private organizations. We want to remove the barriers to collaboration and [inner sourcing](/topics/version-control/what-is-innersource/), but also make it easier to collaborate with external open source projects too.\n\nThe distributed capabilities of Git aren't limited to a single server. Open source software is used extensively in commercial applications of all kinds, but collaboration between open source projects and commercial is difficult. Features and bug fixes to open source projects can sit in stale forks in private Git repositories for lack of tools and process. [Distributed merge requests](https://gitlab.com/groups/gitlab-org/-/epics/260) will make it easy publish a patch from a private GitLab instance to a public upstream server, be it GitLab, GitHub or Bitbucket. Teams will be able to work on a patch privately following internal processes, but instead of merging the reviewed and tested change privately, it can be published to a new public merge request upstream. Contributing fixes and features upstream isn't only good for the community, but it also makes commercial sense by eliminating the costly task of keeping a stale, private fork up to date. We want to make it easy for everyone to contribute to open source software, as individuals and as companies!\n\n![Mockup of distributed merge request widget](https://about.gitlab.com/images/blogimages/merge-request-distributed.png){: .medium.center.shadow}\n\nWe'll also be improving simpler forking workflows too with important quality-of-life improvements. To make it easy to see how far behind or diverged your fork is, we will make it possible to [compare branches](https://gitlab.com/gitlab-org/gitlab-ce/issues/19788) across forks and [cherry pick](https://gitlab.com/gitlab-org/gitlab-ce/issues/43568) changes directly from the upstream project into your fork. Forks of private projects will also [inherit permissions](https://gitlab.com/gitlab-org/gitlab-ce/issues/8935) from the upstream project, making it possible for upstream maintainers to rebase stale merge requests and help contributors. This will allow teams to adopt forking workflows without needing to make every project public to the world or to the organization.\n\n### Code review and approvals\n\nMerge requests are key to the workflows that allow teams to iterate rapidly and ship amazing products quickly, by bringing together all the important information in a single place. Critical to this workflow is the code review, and we want GitLab to be the best tool for doing code reviews.\n\nAutomatic code quality and linting tools can prevent code reviews becoming simple code style reviews, but without the inline feedback a reviewer can't be sure which problems have been automatically detected. A new [API for line by line code quality feedback](https://gitlab.com/gitlab-org/gitlab-ce/issues/50299) will allow output from tools to be rendered natively in GitLab in the merge request diff. Merge request authors will have a single source of truth, and code reviewers can confidently focus on important structural feedback.\n\nCode review feedback cannot truly be resolved and the merge request approved until the reviewer checks the feedback was correctly addressed. This step prevents feedback from being misunderstood or overlooked, but it is currently difficult and time consuming. We are going to streamline this important step by allowing you to [review changes since code review](https://gitlab.com/groups/gitlab-org/-/epics/314) and making [merge request diffs smarter](https://gitlab.com/groups/gitlab-org/-/epics/340). When the change is straightforward, we're going to make it possible to simply [propose a change](https://gitlab.com/gitlab-org/gitlab-ce/issues/18008) as easily as leaving a comment that can be applied with a single click – no more copying and pasting `sed` one liners! And we're going to make it easier to [view and add comments to commits](https://gitlab.com/gitlab-org/gitlab-ee/issues/1769) at any time.\n\nIn the real world, complex features often require large, complex merge requests. We will support these situations better with [commit by commit code review](https://gitlab.com/groups/gitlab-org/-/epics/285), autosquashing [`fixup!`](https://gitlab.com/gitlab-org/gitlab-ee/issues/212) and [`squash!`](https://gitlab.com/gitlab-org/gitlab-ce/issues/50400) commits, and allowing you to [preview](https://gitlab.com/gitlab-org/gitlab-ee/issues/7259) the resultant squashed commits.\n\nComplex real-world changes also need good commit messages, but commit messages are too easily neglected. Without good commit messages, debugging a regression, or modifying an important existing function is painful and error prone. To help teams adopt best practice [commit hygiene](/blog/keeping-git-commit-history-clean/), we will make [commit messages part of code review](https://gitlab.com/groups/gitlab-org/-/epics/286) by allowing comments on commit messages, improving the [visibility of commit messages](https://gitlab.com/gitlab-org/gitlab-ce/issues/49803), and making [squash and merge smarter](https://gitlab.com/gitlab-org/gitlab-ce/issues/47149). GitLab should celebrate great commit messages and amplify their benefits to make it easier for teams to adopt best practices.\n\n### Web IDE\n\nIn 2018 we're building a strong foundation for a cloud development environment with [client side evaluation](https://gitlab.com/gitlab-org/gitlab-ce/issues/47268) and [server side evaluation](https://gitlab.com/gitlab-org/gitlab-ee/issues/4013) powered live previews, and server side evaluation will also enable a [web terminal](https://gitlab.com/gitlab-org/gitlab-ee/issues/5426) to test your changes in real time. IDEs are also very personal and should support customization, to make it easy to move between your local IDE and GitLab IDE. Please share your feedback, and consider contributing – I'd love to see support for [dark syntax themes](https://gitlab.com/gitlab-org/gitlab-ce/issues/46334) and [vim keybindings](https://gitlab.com/gitlab-org/gitlab-ce/issues/47930)!\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/sSWu6TyubTE\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nThe Web IDE makes it easier than ever to resolve code review feedback, reducing the need to switch context in your local development environment, but we can make it even better. Addressing a comprehensive code review still requires switching backwards and forwards between the merge request and the Web IDE. [Line by line code quality feedback](https://gitlab.com/gitlab-org/gitlab-ce/issues/50299) available in the merge request diff will also be available in the Web IDE as will [live linting feedback](https://gitlab.com/groups/gitlab-org/-/epics/70) powered by server side evaluation so to help prevent new code styling problems being created while resolving feedback.\n\nWe are also considering integrating [merge request discussions](https://gitlab.com/groups/gitlab-org/-/epics/72) so that code review comments can be addressed without needing to continually switch between tabs. We don't think the Web IDE should replace the merge request, nor should every feature be duplicated into it, but do think the Web IDE can further simplify the process for resolving code review feedback so teams can iterate faster.\n\n### Summing up\n\nWriting, reviewing, and merging code is where the rubber hits the road when taking your app from idea to production, and in 2019 we want it to be better than ever before!\n\nThe [GitLab product vision](/direction/) is public so you can read up on what we're thinking about at any time, about every part of the product. Please join the conversation and share your feedback on these ideas, and offer ideas of your own! Your contributions – idea or code – are welcomed and appreciated so that we can all work together to make GitLab the best application to build and ship your next great idea.\n",[728,916,749,683,9],{"slug":2105,"featured":6,"template":686},"create-vision","content:en-us:blog:create-vision.yml","Create Vision","en-us/blog/create-vision.yml","en-us/blog/create-vision",{"_path":2111,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2112,"content":2118,"config":2124,"_id":2126,"_type":14,"title":2127,"_source":16,"_file":2128,"_stem":2129,"_extension":19},"/en-us/blog/cross-project-pipeline",{"title":2113,"description":2114,"ogTitle":2113,"ogDescription":2114,"noIndex":6,"ogImage":2115,"ogUrl":2116,"ogSiteName":670,"ogType":671,"canonicalUrls":2116,"schema":2117},"How to trigger multiple pipelines using GitLab CI/CD","Discover how to trigger and visualize pipelines when you set up GitLab CI/CD across multiple projects.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666903/Blog/Hero%20Images/pipeline.jpg","https://about.gitlab.com/blog/cross-project-pipeline","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to trigger multiple pipelines using GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2019-07-24\",\n      }",{"title":2113,"description":2114,"authors":2119,"heroImage":2115,"date":2121,"body":2122,"category":791,"tags":2123},[2120],"Itzik Gan Baruch","2019-07-24","\n[Continuous integration (CI)](/solutions/continuous-integration/) is the practice of [automating code building and testing](/topics/ci-cd/) before it is\nmerged into the master or default branch. This allows developers to merge code early and frequently, while\nmitigating the risk of introducing new bugs into the master source code repository.\n\nWhile CI verifies that new code won't break when integrated with other code in the same repo, having\nall tests pass on that repo is only the first step. After running CI on the code, it is important to\ndeploy and run tests in a live environment. Moving from [CI to continuous delivery and deployment (CD)](/solutions/continuous-integration/)\nis [the next step of DevOps maturity](/topics/devops/). Deploying and then testing again allows code in one project\nto be tested together with other components and services which may be managed in other projects.\n\n## Why do I need to verify that my code works with other components?\n\nA good example could be a\nmicroservices architecture. Usually, different [microservices](/topics/microservices/) are managed in\ndifferent [projects](https://docs.gitlab.com/ee/user/project/) – each microservice has its own\nrepository and own pipeline. It's also very common for different teams to be\nresponsible for different microservices and their pipeline configurations. As a developer you will\nwant to confirm that your code changes don't break functionality of the dependent microservices.\nTherefore, you will want to execute tests on those microservices in addition to your project tests.\n\n## The cross-project pipeline\n\nWhen running your [project pipeline](/topics/ci-cd/cicd-pipeline/), you also want to trigger cross-project or multi-project pipelines,\nwhich will eventually deploy and test the latest version of all dependent microservices. To\nachieve this goal you need an easy, flexible and convenient way to trigger other\npipelines as part of your project CI. GitLab CI/CD offers an easy way to run a cross-project\npipeline by simply adding a pipeline trigger job in the CI configuration file.\n\n## GitLab CI/CD configuration file\n\nIn GitLab CI/CD, pipelines, and their component jobs and stages, are defined in\nthe [`.gitlab-ci.yml`](https://docs.gitlab.com/ee/ci/yaml/) file for each project. The\nfile is part of the project repository. It is fully versioned and developers can edit it with any\ncommon IDE of their choice. They do not have to ask the system admin or DevOps team to make\nchanges in the pipeline configuration as it is self-service. The `.gitlab-ci.yml` file defines the structure\nand order of the pipelines and determines what to execute\nusing [GitLab Runner](https://docs.gitlab.com/runner/) (the agent that runs the jobs), and what\ndecisions to make when specific conditions are encountered, like when a process succeeds or fails.\n\n## Add a cross-project pipeline triggering job\n\nSince GitLab 11.8, GitLab provides a new CI/CD configuration syntax for triggering cross-project\npipelines found in the [pipeline configuration file](https://docs.gitlab.com/ee/ci/yaml/).\nThe following code illustrates configuring a bridge job to trigger a downstream pipeline:\n\n```\n//job1 is a job in the upstream project\ndeploy:\n\tstage: Deploy\n\tscript: this is my script\n\n//job2 is a bridge job in the upstream project which triggers cross-project pipeline\nAndroid:\n\tstage: Trigger-cross-projects\n            trigger: mobile/android\n```\n\nIn the example above, as soon as the deploy job succeeds in the deploy stage, the Android\nbridge job is going to be started. The initial status of this job will be pending. GitLab will\ncreate a downstream pipeline in the mobile/android project and, as soon as the pipeline gets created,\nthe Android job will succeed. In this case mobile/android is a full path to that project.\n\nThe user who created the upstream pipeline needs to have access rights to the downstream\nproject (mobile/android in this case). If a downstream project cannot be found, or a user does not\nhave access rights to create a pipeline there, the Android job will be marked as failed.\n\n## Browse from upstream pipeline graphs to downstream\n\nGitLab CI/CD makes it possible to visualize the pipeline configuration. In the below illustration, the\nbuild, test, and deploy stages are parts of the upstream project. Once the deploy job succeeds, four\ncross-projects will be triggered in parallel and you will be able to browse to them by clicking on\none of the downstream jobs.\n\n![Build, test and deploy stages](https://about.gitlab.com/images/blogimages/Cross-proj-img1.png){: .shadow.medium.center}\n\nIn the below illustration the Service – Finance downstream pipeline is visible. We can now scroll\nleft to the upstream pipeline, scroll right back to the downstream pipeline or select another\ndownstream pipeline.\n\n![Service – Finance pipeline](https://about.gitlab.com/images/blogimages/Cross-proj-img2.png){: .shadow.medium.center}\n\n## Specifying a downstream pipeline branch\n\nIt is possible to specify a branch name that a downstream pipeline will use:\n\n```\ntrigger:\n     project: mobile/android\n     branch: stable-11-2\n```\n\nUse a project keyword to specify the full path to a downstream project. Use a branch keyword to\nspecify a branch name. GitLab will use a commit that is currently on the HEAD of the branch\nwhen creating a downstream pipeline.\n\n## Passing variables to a downstream pipeline\n\nSometimes you might want to pass variables to a downstream pipeline. You can do that using\nthe variables keyword, just like you would when defining a regular job.\n\n```\nAndroid:\n           variable:\n\t     ENVIRONMENT: ‘This is the variable value for the downstream pipeline’\n           stage: Trigger-cross-projects\n           trigger: mobile/android\n```\nThe ENVIRONMENT variable will be passed to every job defined in a downstream pipeline. It will be\navailable as an environment variable when GitLab Runner picks a job.\n\n## Cross-project pipeline summary\n\nThe `.gitlab-ci.yml` file defines the order of the CI/CD stages, which jobs to execute, and at which\nconditions to run or skip a job's execution. Adding a 'bridge job' with the `trigger` keyword to\nthis file can be used to trigger cross-project pipelines. We can pass parameters to jobs in\ndownstream pipelines, and even define a branch that a downstream pipeline will use.\n\nPipelines can be complex structures with many sequential and parallel jobs, and as we just\nlearned, sometimes they can trigger downstream pipelines. To make it easier to understand the\nflow of a pipeline, including its downstream pipelines, GitLab has pipeline graphs for viewing\npipelines and each pipeline's status.\n\n![Service – Finance pipeline](https://about.gitlab.com/images/blogimages/Cross-proj-img4.png){: .shadow.medium.center}\n\nHey community, what else would you like me to explain in a blog post? Let me know in the comments or tweet us [@gitlab](https://twitter.com/gitlab).\n\nCover image by [Tian Kuan](https://unsplash.com/@realaxer) on [Unsplash](https://unsplash.com)\n{: .note}\n",[109,9,916,1158,683],{"slug":2125,"featured":6,"template":686},"cross-project-pipeline","content:en-us:blog:cross-project-pipeline.yml","Cross Project Pipeline","en-us/blog/cross-project-pipeline.yml","en-us/blog/cross-project-pipeline",{"_path":2131,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2132,"content":2138,"config":2145,"_id":2147,"_type":14,"title":2148,"_source":16,"_file":2149,"_stem":2150,"_extension":19},"/en-us/blog/custom-actions-rasa-gitlab-devops",{"title":2133,"description":2134,"ogTitle":2133,"ogDescription":2134,"noIndex":6,"ogImage":2135,"ogUrl":2136,"ogSiteName":670,"ogType":671,"canonicalUrls":2136,"schema":2137},"Creating custom action containers for Rasa X with GitLab","Using the GitLab DevOps Platform together with Rasa X can make it easier for stakeholders to deliver a virtual assistant by automating potentially time-consuming, error-prone steps. In this case, we’ve shown how you can build Rasa custom action servers and deploy them to Kubernetes.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668410/Blog/Hero%20Images/vablog.jpg","https://about.gitlab.com/blog/custom-actions-rasa-gitlab-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Create and Deploy Custom Actions Containers to Rasa X using Gitlab DevOps Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Arias\"}],\n        \"datePublished\": \"2021-04-06\",\n      }",{"title":2139,"description":2134,"authors":2140,"heroImage":2135,"date":2142,"body":2143,"category":769,"tags":2144},"Create and Deploy Custom Actions Containers to Rasa X using Gitlab DevOps Platform",[2141],"William Arias","2021-04-06","\n**This blog post was a collaboration between William Arias, from Gitlab, and Vincent D. Warmerdam, from Rasa. You can find the same blog post on [Rasa's blog](https://blog.rasa.com/create-and-deploy-custom-actions-containers-to-rasa-x-using-gitlab-devops-platform/)**.  \n\n## Create and Deploy Custom Actions Containers to Rasa X using Gitlab DevOps Platform\nVirtual assistants do more than just carry on conversations. They can send emails, make updates to a calendar, or call an API endpoint. Essentially, they can do actions that add significant value and convenience to the user experience.\nIn assistants built with Rasa*, this type of functionality is executed by custom code called custom actions. As with any code you run in production, you’ll need to think about how you want to deploy updates to custom actions. In this blog post, we’ll show you how to set up GitLab to deploy custom action Docker containers to your Kubernetes cluster. If we follow [good DevOps practices](/stages-devops-lifecycle/) we can greatly speed up the development and quality of our  virtual assistants.\n* Rasa Open Source is a machine learning framework for building text and voice-based virtual assistants. It provides infrastructure for understanding messages, holding conversations, and connecting to many messaging channels and APIs. Rasa X is a toolset that runs on top of Rasa Open Source, extending its capabilities. Rasa X includes key features for sharing the assistant with test users, reviewing and annotating conversation data, and deploying the assistant. [Learn more about Rasa.](https://rasa.com/docs/)\n\n## Deployment high-level overview\nThe typical workflow for deploying a new version of custom actions is outlined below.  \n![actions-process](https://about.gitlab.com/images/blogimages/actions-process.png){: .shadow}\n\nEvery change to your custom actions code will require a new container image to be built and pulled by Rasa X. Gitlab CI/CD can save you from doing a lot of manual work and automate steps like the ones described in the workflow above. Let's see how to do it.  \n\n## Using Rasa with Gitlab DevOps Platform\nLet's create a pipeline that will automate manual steps.\n\n---\n**NOTE**\nThis article assumes you have your [Gitlab Project](https://gitlab.com/warias/gl-commit-2020) with your customs Actions Code created along with a [Google Kubernetes Cluster](https://cloud.google.com/kubernetes-engine/docs/quickstart).\n\n---\n\nIf you are a Gitlab user you are probably familiar with .gitlab-ci.yml file and its CI/CD capabilities. Every time you commit a change to your customs actions code you want Gitlab to run a script that will build and update your docker containers. \n![actions-process-2](https://about.gitlab.com/images/blogimages/process2.png){: .shadow}\n\nLet's breakdown the CI/CD pipeline by describing the gitlab-ci.yml file so you can use it and customize it to your needs\n## Variables\nWe make use of environment variables created in Gitlab at the moment of running the Jobs to define our actions Docker image  \n\n```\nvariables:\n    ACTIONS_CONTAINER_IMAGE: $CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG\n    TAG: $CI_COMMIT_SHA\n    K8S_SECRET: secret-gitlab-registry\n\n```\n\nThe snippet above does the following:\n- It defines the name of the Docker Image for custom actions using environment variables ```$CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG.``` This will make the name of the Docker image different for every commit\n- It creates a secret used to pull the Rasa Action Image from the Gitlab Private Registry to the Google Kubernetes Cluster. \n\n## Stages\nWe have two main stages in our pipeline, build and deploy:\n```\nstages:\n  - build\n  - deploy \n```\nEvery time there is a new commit with changes to our custom actions code, or when we decide to run the CI/CD Pipeline it will:\n- Build: Here, we automate the building of the Docker image using the variables defined above, and the Dockerfile. We also tag the image and push it to the GitLab container registry.\n- Deploy: Here we log-in to Kubernetes Engine on Google Cloud and deploy the newly created Actions image to Rasa X.\nLet's see it in more detail:  \n\n**Build**:\n```\nbuild-actions-image:\n image: docker:19.03.1\n services:\n   - docker:dind\n stage: build\n script:\n   - docker login -u ```$CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY```\n   - docker build -t $ACTIONS_CONTAINER_IMAGE:$TAG -f Dockerfile .\n   - docker push $ACTIONS_CONTAINER_IMAGE:$TAG\n```\nThe job build-actions-image executed on the build stage takes advantage of the CI/CD variables that are part of the environment where the pipelines run. It automates the usage of Docker commands to build the Actions image by reading its corresponding Dockerfile. The output of this stage is a new Custom Actions image per every commit with code changes.  \n\n**Deploy**:\n```\ndeploy-custom-action-x:\n  stage: deploy\n  image: crileroro/gcloud-kubectl-helm\n  variables:\n    GCP_PROJECT: gke-project-302411\n    GCP_REGION: europe-west1\n    CLUSTER_NAME: gke-python-demo\n    NAMESPACE_RASA: rasa-environment \n  before_script:\n    - gcloud auth activate-service-account --key-file $SERVICE_ACCOUNT_GCP\n    - gcloud config set project $GCP_PROJECT\n    - gcloud config set compute/region $GCP_REGION\n    - gcloud container clusters get-credentials $CLUSTER_NAME\n  script:\n    - kubectl create ns $NAMESPACE_RASA --dry-run=client -o yaml | kubectl apply -f -\n    - kubectl create secret docker-registry $K8S_SECRET\n              --docker-server=$CI_REGISTRY\n              --docker-username=$CI_DEPLOY_USER\n              --docker-password=$CI_DEPLOY_PASSWORD\n              --namespace $NAMESPACE_RASA\n              -o yaml --dry-run=client | kubectl apply -f -\n    - helm repo add rasa-x https://rasahq.github.io/rasa-x-helm\n    - helm upgrade -i --reuse-values \n                      --namespace $NAMESPACE_RASA\n                      --set app.name=$ACTIONS_CONTAINER_IMAGE\n                      --set app.tag=$TAG \n                      --set images.imagePullSecrets[0].name=$K8S_SECRET rasa-x rasa-x/rasa-x\n```\n\nNotice the variables in ```before_script```, these ones are needed to authenticate to GCP where we have our Kubernetes cluster. This step is optional and could be skipped in cases where you have [Gitlab pre-integrated](https://docs.gitlab.com/ee/user/project/clusters/add_remove_clusters.html) with your Kubernetes cluster running on Google Cloud.  \n\nThe main and most interesting part of the script is:  \n```\nscript:\n    - kubectl create ns $NAMESPACE_RASA --dry-run=client -o yaml | kubectl apply -f -\n    - kubectl create secret docker-registry $K8S_SECRET\n              --docker-server=$CI_REGISTRY\n              --docker-username=$CI_DEPLOY_USER\n              --docker-password=$CI_DEPLOY_PASSWORD\n              --namespace $NAMESPACE_RASA\n              -o yaml --dry-run=client | kubectl apply -f -\n    - helm repo add rasa-x https://rasahq.github.io/rasa-x-helm\n    - helm upgrade -i --reuse-values \n                      --namespace $NAMESPACE_RASA\n                      --set app.name=$ACTIONS_CONTAINER_IMAGE\n                      --set app.tag=$TAG \n                      --set images.imagePullSecrets[0].name=$K8S_SECRET rasa-x rasa-x/rasa-x\n\n```\n\nWe start by creating the *namespace* for our custom actions code, and if it already exists, then we proceed to apply Kubernetes commands using kubectl and helm.  \n```\nhelm repo add rasa-x https://rasahq.github.io/rasa-x-helm\n    - helm upgrade -i --reuse-values \n                      --namespace $NAMESPACE_RASA\n                      --set app.name=$ACTIONS_CONTAINER_IMAGE\n                      --set app.tag=$TAG \n                      --set images.imagePullSecrets[0].name=$K8S_SECRET rasa-x rasa-x/rasa-x\n```\nThe snippet above adds a rasa-x Helm chart and upgrades or changes the values corresponding to the new **Custom Action Image** by assigning to it the ```$ACTIONS_CONTAINER_IMAGE``` created in the build stage.\nNote that the pipeline described above focuses only on creating and deploying the ACTIONS_CONTAINER_IMAGE. It could be extended by adding more stages, for example, code quality, security testing, and unit testing among others.  \n\n## Summary\nUsing the GitLab DevOps Platform together with Rasa X can make it easier for stakeholders to deliver a virtual assistant by automating potentially time-consuming, error-prone steps. In this case, we’ve shown how you can build Rasa custom action servers and deploy them to Kubernetes.\nPushing new custom action containers to Kubernetes only scratches the surface of what you can automate with GitLab. You could also add steps for code quality, security audits and unit tests. The main goal is to automate the manual parts of deployment so that you can focus on what is important. In the case of Rasa X, that means that more time can be spent learning from your users and making a better assistant in the process.\n\nDo you want to learn more? Watch this video of Gitlab DevOps Platform and Rasa [Deploy your Rasa Chatbots like a boss with DevOps](https://youtu.be/ko9-zPDuhQo)\n\nHappy hacking!\n\nCover image by [Eric Krull](https://unsplash.com/@ekrull?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com)\n{: .note}\n",[976,1041,9],{"slug":2146,"featured":6,"template":686},"custom-actions-rasa-gitlab-devops","content:en-us:blog:custom-actions-rasa-gitlab-devops.yml","Custom Actions Rasa Gitlab Devops","en-us/blog/custom-actions-rasa-gitlab-devops.yml","en-us/blog/custom-actions-rasa-gitlab-devops",{"_path":2152,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2153,"content":2159,"config":2165,"_id":2167,"_type":14,"title":2168,"_source":16,"_file":2169,"_stem":2170,"_extension":19},"/en-us/blog/customer-interview-charter-communications",{"title":2154,"description":2155,"ogTitle":2154,"ogDescription":2155,"noIndex":6,"ogImage":2156,"ogUrl":2157,"ogSiteName":670,"ogType":671,"canonicalUrls":2157,"schema":2158},"Better Developer & Customer Experiences with One Application","Director of Product Integration Michael Sobota of Charter Communications shares how they're using GitLab to simplify their toolchain, with big results.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663655/Blog/Hero%20Images/gitlab-live-sept-2018.png","https://about.gitlab.com/blog/customer-interview-charter-communications","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Customer story: Driving better developer and customer experiences with a single application\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2018-09-26\",\n      }",{"title":2160,"description":2155,"authors":2161,"heroImage":2156,"date":2162,"body":2163,"category":299,"tags":2164},"Customer story: Driving better developer and customer experiences with a single application",[1378],"2018-09-26","\nDuring [#GitLabLive](/blog/gitlab-live-event-recap/), customer Michael Sobota,\nDirector of Product Integration at [Charter Communications](https://www.spectrum.com/about.html), joined us to share how adopting\nGitLab as the [single application](/handbook/product/single-application/) for their entire software development lifecycle has brought their\nfeedback loop of two weeks down to a matter of minutes. Charter is an American telecom\ncompany providing services to over 26 million customers in 41 states, and is the second-largest\ncable operator in the US. They have 94,000 employees worldwide.\n\nYou can watch the interview with Michael and check out our key takeaways from it below:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/HnTPi7y5MVo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Key takeaways\n\n### A single place for all development, operations, and feedback is critical to a great developer experience\n\n Michael: \"It's my job to make sure developers who are providing a digital experience to our\n subscribers have a great developer experience: Helping them realize that vision of quick iterations,\n giving them feedback, shifting left concerns like security and testing, deployments, and getting\n that feedback early in our value stream where it’s cheaper to course correct.\"\n\n\"GitLab has been a cornerstone of our [DevOps platform](/solutions/devops-platform/): using it for source control management,\nfor continuous integration, continuous deployment, a Docker registry, artifacts. We want to give\ndevelopers a single place to get feedback, self-service, and do it in a responsible manner that\nallows us to provide great value to our subscribers.\"\n\n### Quick feedback is also essential to staying competitive\n\nMichael: \"Consumers and subscribers are looking for different, more digital ways to interact\nwith companies and to consume content. Shifting left allows us to be competitive in creating\nthese new, digital ways for consumers to interact with us, whether it’s paying their bill or understanding\nhow their account is set up, ordering a new service, consuming live streaming video, or video on demand.\nCustomers want that quick feedback and do to that we need to shift things left.\"\n\n### Having everything in one place can drastically reduce your feedback loop\n\nMichael: To be able to understand, \"Did my code merge in? Did it build the capacity tests? Did it pass\nthe security standards?\" – these things, in a single place, within the merge request, within that\nUI, have helped us cut down our feedback loop that was typically around our sprint cycle of\naround two weeks, down to minutes.\"\n\n\"Gone are the days of managing different build machines. It’s all in the power of the developers,\nand now from the first line of code on every single branch, we can deploy a mutually exclusive\nenvironment and get feedback in minutes down from that two-week cycle. Now, almost every\nsingle branch of code can have a deployment, and you can have feedback as a developer, as a\nproduct owner, or as a designer, right away.\"\n",[9,1829,683],{"slug":2166,"featured":6,"template":686},"customer-interview-charter-communications","content:en-us:blog:customer-interview-charter-communications.yml","Customer Interview Charter Communications","en-us/blog/customer-interview-charter-communications.yml","en-us/blog/customer-interview-charter-communications",{"_path":2172,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2173,"content":2179,"config":2184,"_id":2186,"_type":14,"title":2187,"_source":16,"_file":2188,"_stem":2189,"_extension":19},"/en-us/blog/dag-manual-fix",{"title":2174,"description":2175,"ogTitle":2174,"ogDescription":2175,"noIndex":6,"ogImage":2176,"ogUrl":2177,"ogSiteName":670,"ogType":671,"canonicalUrls":2177,"schema":2178},"How to use manual jobs with `needs:` relationships","Are you using manual jobs and needs relationship in your CI/CD pipeline? Learn more about the fix that might cause your pipeline to behave differently.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683170/Blog/Hero%20Images/blog_cover2.png","https://about.gitlab.com/blog/dag-manual-fix","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use manual jobs with `needs:` relationships\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2021-05-20\",\n      }",{"title":2174,"description":2175,"authors":2180,"heroImage":2176,"date":2181,"body":2182,"category":791,"tags":2183},[2022],"2021-05-20","\n\n## A bug when job `needs` a manual job\n\nIn [13.12 we fixed a bug](https://gitlab.com/gitlab-org/gitlab/-/issues/31264) that might affect the existing behavior of your pipeline. We explain why we had to fix the bug, the possible impact of this change on your pipeline, and the proposed workaround if you would like to revert this behavior.\n\n## Background on a two-job pipeline\n\nIn GitLab CI/CD you can easily configure a job to require manual intervention before it runs. The job gets added to the pipeline, but doesn't run until you click the **play** button on it.\n\nLet's look at a two-job pipeline:\n\n```yaml\nstages:\n  - stage1\n  - stage2\n\njob1:\n  stage: stage1\n  script:\n    - echo \"this is an automatic job\"\n\nmanual_job:\n  stage: stage2\n  script:\n    - echo \"This is a manual job which doesn't start automatically, and the pipeline can complete without it starting.\"\n  when: manual # This setting turns a job into a manual one\n```\n\nThis is how it looks when we look at the pipeline graph:\n\n![image2](https://about.gitlab.com/images/blogimages/11-05-2021-when-job-needs-manual/blog1.png){: .shadow.medium.center.wrap-text}\n\nNotice that the manual job gets skipped, and the pipeline completes successfully even though the manual job did not get triggered. This happens because manual jobs are considered optional, and do not need to run.\n\nInternally, manual jobs have `allow_failure` set to true by default, which means that these skipped manual jobs do not cause a pipeline failure. The YAML code below demonstrates how to write the manual job, which results in the same behavior. The job doesn't automatically start, is skipped, and the pipeline passes.\n\n```yaml\nmanual_job:\n  stage: stage2\n  script:\n    - echo \"This is a manual job which doesn't start automatically, and the pipeline can complete without it starting.\"\n  when: manual\n  allow_failure: true # this line is redundant since manual job has this setting by default\n```\n\nYou can set `allow_failure` to true for any job, including both manual and automatic jobs, and then the pipeline does not care if the job runs successfully or not.\n\n### How to expand the configuration with `needs` (DAG)\n\n  Last year we introduced the [`needs` keyword which lets you create a Directed Acyclic Graphs (DAG) to speed up your pipeline](https://docs.gitlab.com/ee/ci/yaml/#needs). The `needs` keyword creates a dependency between two jobs regardless of their stage.\n\nLet's look at this example:\n\n```yaml\nstages:\n  - stage1\n  ....\n  - stage10\n\njob1: # this is the first job that runs in the pipeline\n  stage: stage1\n  script:\n    - echo \"exit 0\"\n.....\n\njob10:\n  needs:  # Defined a \"needs\" relationship with job1\n    - job1\n  stage: stage10\n  script:\n    - echo \"This job runs as soon as job1 completes, even though this job is in stage10.\"\n```\n\nThe `needs` keyword creates a dependency between the two jobs, so `job10` runs as soon as `job1` **finishes running** successfully, regardless of the stage ordering.\n\nSo what happens if a job `needs` a manual job, that doesn't start running automatically?\n\nLet's look at the following example:\n\n```yaml\nstages:\n  - build\n  - test\n  - deploy\n\nbuild:\n  stage: build\n  script: exit 0\n\ntest:\n  stage: test\n  when: manual\n  script: exit 0\n\ndeploy:\n  stage: deploy\n  script: echo \"when should this job run?\"\n  needs:\n    - test\n```\n\nBefore 13.12, this type of configuration would cause the pipeline to get stuck. The `deploy` job can only start when the `test` job completes, but the `test` job does not start automatically. The rest of the pipeline stops and waits for someone to run the manual `test` job.\n\n![image3](https://about.gitlab.com/images/blogimages/11-05-2021-when-job-needs-manual/blog2.png){: .shadow.medium.center.wrap-text}\n\nThis behavior is even worse with larger pipelines:\n\n![image4](https://about.gitlab.com/images/blogimages/11-05-2021-when-job-needs-manual/blog3.png){: .shadow.medium.center.wrap-text}\n\nThe example above shows there is a needs relationship between `post test` job and the `test` job (which is a manual job) as you can see the pipeline is stuck in a running state and any subsequent jobs will not run.\n\nThis was not the behavior most users expected, so we improved it in 13.12. Now, if there is a `needs` relationship pointing to a manual job, the pipeline doesn't stop by default anymore. The manual job is considered optional by default in all cases now. Any jobs that have a `needs` relationship to manual jobs are now also considered optional and skipped if the manual job isn't triggered. If you start the manual job, the jobs that need it can start after it completes.\n\nNote that if you start the manual job before a later job that has it in a `needs` configuration, the later job will still wait for the manual job to finishes running.\n\n## What if I don't want this new behavior?\n\nOne of the reasons we selected this solution is that you can quickly revert this change. If you made use of this inadvertent behavior and configured your pipelines to use it to block on manual jobs, it's easy to return to that previous behavior. All you have to do is override the default `allow_failure` in the manual job with `allow_failure: false`. This way the manual job is no longer optional, and the pipeline status will be marked as blocked and wait for you to run the job manually.\n\n```yaml\nstages:\n  - build\n  - test\n  - deploy\n\nbuild:\n  stage: build\n  script: exit 0\n\ntest:\n  stage: test\n  when: manual\n  allow_failure: false  # Set to false to return to the previous behavior.\n  script: exit 0\n\ndeploy:\n  stage: deploy\n  script: exit 0\n  needs:\n    - test\n```\n\nShare any thoughts, comments, or questions, by opening an issue in GitLab and mentioning me (`@dhershkovitch`).\n",[976,1243,9],{"slug":2185,"featured":6,"template":686},"dag-manual-fix","content:en-us:blog:dag-manual-fix.yml","Dag Manual Fix","en-us/blog/dag-manual-fix.yml","en-us/blog/dag-manual-fix",{"_path":2191,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2192,"content":2198,"config":2203,"_id":2205,"_type":14,"title":2206,"_source":16,"_file":2207,"_stem":2208,"_extension":19},"/en-us/blog/defend-cicd-security",{"title":2193,"description":2194,"ogTitle":2193,"ogDescription":2194,"noIndex":6,"ogImage":2195,"ogUrl":2196,"ogSiteName":670,"ogType":671,"canonicalUrls":2196,"schema":2197},"Defending the CI/CD pipeline","Speed to launch often comes at the cost of security – but it doesn’t have to. Here are four ways to achieve both by using a CI/CD pipeline","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678499/Blog/Hero%20Images/defend-cicd-security.jpg","https://about.gitlab.com/blog/defend-cicd-security","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Defending the CI/CD pipeline\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2019-11-19\",\n      }",{"title":2193,"description":2194,"authors":2199,"heroImage":2195,"date":2200,"body":2201,"category":679,"tags":2202},[1016],"2019-11-19","\n[CI/CD](/topics/ci-cd/) is a way to release software as quickly as possible, which, unfortunately, often comes at the expense of security. [Synopsys and \n451 Research found](https://www.synopsys.com/blogs/software-security/security-challenges-cicd-workflows/) \nthe most significant [application security](/topics/devsecops/) challenges in CI/CD workflows \ninclude a lack of automated, integrated security testing tools, inconsistent \nmethods, slowed workflows, and too many false positives.\n\nThere’s also the challenge of securing the pipeline itself. Traditional and \nmanual security practices can’t scale to the level of CI/CD – the resulting delivery pipelines expand a company’s attack surface by a significant measure. The pipeline represents an end-to-end lifecycle for your software which makes it a \nprime target for hackers. It's clear [CI/CD security](/solutions/security-compliance/) can’t be an afterthought. DevOps teams \nmust bring security issues to the forefront of their considerations throughout the SDLC. \n\n## Security risks in enterprise CI/CD\n\nCI/CD significantly broadens your attack surface with a lengthy list of \ncomponents – repositories, servers, containers, and for those who don’t use \nGitLab, a wide array of tools. A large number of moving pieces presents a \ntempting ROI for hackers – one compromised segment of the ecosystem could open \nup the entire infrastructure for exploitation. [As tech journalist Twain Taylor \nexplains](https://thenewstack.io/the-biggest-security-risks-lurking-in-your-ci-cd-pipeline/), \nsecuring the CI/CD pipeline is not a straightforward process. Teams need to study the \npipeline, understand what information the pipeline ingests, uncover any major \nvulnerabilities and find ways to eliminate those risks.\n\nAlso, tools that lack transparency, require frequent switching \nbetween platforms, and inhibit the overall workflow are less likely to be \nadopted – and more likely to be worked around. Workarounds can create friction in the pipeline which can mean inconsistent \ntesting and remediation, all of which can allow more vulnerabilities to make their way \nthrough to production and launch.\n\n## Defending against CI/CD pipeline risks\n\nSecure CI/CD can be achieved through [DevSecOps](/topics/devsecops/) but you’ll need a mature CI/CD solution to get you there. In addition to the \nstability of the solution, your lifecycle ecosystem must be well-maintained and \neasily monitored for suspicious activity. Four of the most important aspects of \na secure CI/CD pipeline are automation, access management, positive user \nexperience, and transparency.\n\n### Automation\n\nAutomation, at the very least, should allow you to bring your security \npractices (especially [testing](/stages-devops-lifecycle/application-security-testing/)) \nup to the speed and scale of CI/CD. The value of automation magnifies when \nprocesses are standardized across teams and organizations. By introducing \nrepeatability to your projects, you’re also creating expected functionality and operations within your pipeline. When there are behaviors \nor activities that don’t align to the expected, a red flag will be triggered alerting developers to potential threats.\n\n### Access management\n\nAccess rights should be considered for both human-to-tool and tool-to-tool \ninteractions. [Tripwire recommends](https://www.tripwire.com/state-of-security/devops/security-ci-cd-pipeline-flowing/) \nrequiring authentication for anyone to push changes to the pipeline, \nimplementing login tracking, and confirming that builds reside on secure \nservers. \n\nCommunication between tools and components should be carefully managed \nto ensure that access is only granted on an as-needed basis. The New Stack's Twain also notes it’s important to consider what secrets are contained in pipeline scripts. He recommends removing any keys, credentials, and secrets from scripts and \nprotecting them with trusted secrets managers. He also suggests implementing \naccess control across your entire toolchain to revoke anything anonymous or shared, and to regularly audit the controls across the \necosystem. \n\n### User experience\n\nSeamless integration between tools will make a night-and-day difference in \nsecuring your CI/CD pipeline (alternatively, you could also use [a single tool \nfor the entire lifecycle]/handbook/product/single-application/)). \nEven though security is gaining traction in the minds of non-security \nprofessionals, it still remains a challenge for many development teams. Provide \ndevelopers with tools and practices that are standard across the organization, \nand reduce friction between tools as much as possible. \n\nWith lower barriers to \nadoption, your team will be less likely to create workarounds that could \njeopardize your business or customers. Providing users with immediate \nfeedback on the security of their code will enable them to remediate on the \nspot and serve an educational purpose, showing developers what to watch out \nfor when writing code. \n\n### Transparency\n\nIt's vital to have a view into what happens throughout the CI/CD pipeline. Maintain a single source of truth that logs every change – \nas well as its origin – and include functionality that allows sign-off for any \nhigh-stakes updates. Transparency also builds accountability among team members, \nreenforcing the idea that everyone is responsible for security. Lastly, \ntransparency is crucial to your team communication strategy. Methodologies and \nknowledge should be communicated openly and thoroughly, so that everyone on the \nteam understands how to apply best practices and what the intended outcomes are.\n\n## Speed and security: No longer a paradox\n\nEach of the above steps will help your security efforts shift left in the \nSDLC. Moving it all earlier in the process will enable you to release secure, quality software at the \nspeed of the business. This can only happen if there is true collaboration between development, operations, \nand security. Set policies and standard practices, understand respective \ngoals, and foster a culture of responsibility for the software as a \nwhole – and not just one facet of its creation or performance.\n\n## The security benefits of a single CI/CD tool for the entire lifecycle\n\nIt’s extremely important to use established tools that have been thoroughly \nvetted by both your internal teams and the market at large. That being said, \nfinding the best-in-class tools for every phase of the lifecycle and then \nsuccessfully (and securely) stringing them together can be a nightmare and result in untold technical debt. A single CI/CD tool relieves much of \nthat burden, by eliminating unnecessary platform switching and enabling high \ntransparency throughout the pipeline. With GitLab in particular, security \nchecks are embedded within the development workflow, which both reduces \nfriction for developers and provides a single source of truth for the entire \npipeline.\n\nRegardless of your tool (or tools) of choice, it’s critical that you and your \nteam prioritize security in all aspects of work.\n\nCover image by [Boban Simonovski](https://unsplash.com/@3031n) on [Unsplash](https://unsplash.com/photos/akQ06aB6MfM)\n{: .note}\n",[109,9,875],{"slug":2204,"featured":6,"template":686},"defend-cicd-security","content:en-us:blog:defend-cicd-security.yml","Defend Cicd Security","en-us/blog/defend-cicd-security.yml","en-us/blog/defend-cicd-security",{"_path":2210,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2211,"content":2217,"config":2223,"_id":2225,"_type":14,"title":2226,"_source":16,"_file":2227,"_stem":2228,"_extension":19},"/en-us/blog/deploy-aws",{"title":2212,"description":2213,"ogTitle":2212,"ogDescription":2213,"noIndex":6,"ogImage":2214,"ogUrl":2215,"ogSiteName":670,"ogType":671,"canonicalUrls":2215,"schema":2216},"How to deploy to AWS with GitLab","We believe deploying to the cloud should be easy and boring. The deployment process is the same regardless of what tech stack you're using so why not automate it?","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672124/Blog/Hero%20Images/aws_rocket.jpg","https://about.gitlab.com/blog/deploy-aws","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to deploy to AWS with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Orit Golowinski\"}],\n        \"datePublished\": \"2020-12-15\",\n      }",{"title":2212,"description":2213,"authors":2218,"heroImage":2214,"date":2220,"body":2221,"category":791,"tags":2222},[2219],"Orit Golowinski","2020-12-15","\nCloud computing services are replacing traditional hardware technologies at an extremely fast pace. The majority of businesses worldwide are already moving their applications to the cloud — both public and private cloud — or plan to in the near future. Over a short period of time, this technology took over the market as businesses preferred remote access to data as well as the cloud's scalability, economy, and reach.\n\n## AWS Deployment: deploying applications to the cloud\n\nCOVID-19 and the resulting trend toward remote work forced organizations to adopt cloud technologies even if they hadn’t planned to originally. Software deployment to the cloud has also increased. Cloud is no longer just virtual machines, organizations are driving the use of [Containers as a Service (CaaS)](https://searchitoperations.techtarget.com/definition/Containers-as-a-Service-CaaS) due to their growing interest in leveraging containers to ease development and testing, speed up deployment, scale operations, and increase the efficiency of workloads running in the cloud.\n\nSince deployment to the cloud has become a standard practice, at GitLab we want to make this repeatable and [boring](https://handbook.gitlab.com/handbook/values/#boring-solutions). In this blog post, we explain how we've made it easier to deploy to Amazon Web Services (AWS) as part of your deployment process. We invite users to replicate this example to deploy to other cloud providers in a similar way.\n\nSince we want cloud deployment to be as flexible as possible (similar to a microservices architecture), we constructed atomic Docker images that function as building blocks. Users can use these images as part of their custom `gitlab-ci.yml` file or use our predefined `.gitlab-ci.yml` templates. We also added the ability to use [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) with the new AWS deployment targets.\n\n## AWS Deployment: how to use GitLab's official AWS Docker Images\n\n### AWS CLI Docker image\nIn [GitLab 12.6](/releases/2019/12/22/gitlab-12-6-released/), we provided an official GitLab [AWS cloud-deploy](https://gitlab.com/gitlab-org/cloud-deploy/-/blob/master/aws/cloud_deploy/Dockerfile) Docker image that downloads and installs the [AWS CLI](https://aws.amazon.com/cli/). This allows users to run `aws` commands directly from their pipelines. For more information, see [Run AWS commands from GitLab CI/CD](https://docs.gitlab.com/ee/ci/cloud_deployment/#run-aws-commands-from-gitlab-cicd).\n\n### CloudFormation stack creation Docker image\nIn [GitLab 13.5](/releases/2020/10/22/gitlab-13-5-released/), we provided a Docker image that runs a script that [creates a stack with CloudFormation](https://gitlab.com/gitlab-org/cloud-deploy/-/blob/master/aws/src/bin/gl-cloudformation). The `gl-cloudprovision create-stack` uses [aws cloudformation create-stack](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) behind the scenes. A JSON file based on the CloudFormation template must be passed to that command. For an example of this type of JSON file, see [`cf_create_stack.json`](https://gitlab.com/ebaque/jekyll-demo/-/blob/deploy-to-ec2/aws/cf_create_stack.json). With this type of JSON file, the command creates the infrastructure on AWS, including an EC2 instance directly from the `.gitlab-ci.yml` file. The script exists once we get confirmation that the stack setup is complete or has failed (through periodic polling).\n\n### Push to S3 and Deploy to EC2 Docker image\nIn [GitLab 13.5](/releases/2020/10/22/gitlab-13-5-released/) we also provided a Docker image with [Push to S3 and Deploy to EC2 scripts](https://gitlab.com/gitlab-org/cloud-deploy/-/blob/master/aws/src/bin/gl-ec2). The `gl-ec2 push-to-s3` script pushes source code to an S3 bucket. For an example of the JSON file to pass to the `aws deploy push` command, see [`s3_push.json`](https://gitlab.com/ebaque/jekyll-demo/-/blob/deploy-to-ec2/aws/s3_push.json). This code can be whatever artifact is built from a preceding build job. The `gl-ec2 deploy-to-ec2` script uses `aws deploy create-deployment` behind the scenes to create a deployment to an EC2 instance directly from the `.gitlab-ci.yml` file. For an example of the JSON template to pass, see [`create_deployment.json`](https://gitlab.com/ebaque/jekyll-demo/-/blob/deploy-to-ec2/aws/create_deployment.json). The script ends once we get confirmation that the deployment has succeeded or failed (via polling).\n\n## AWS Deployment: using GitLab CI templates to deploy to AWS\n\n### How to deploy to Elastic Container Service (ECS) with GitLab\nIn [GitLab 12.9](/releases/2020/03/22/gitlab-12-9-released/), we created a full `.gitlab-ci.yml` template called [`Deploy-ECS.giltab-ci.yml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Jobs/Deploy/ECS.gitlab-ci.yml) that deploys to Amazon ECS and extends support for Fargate. Users can include the template in their configuration, specify a few variables, and their application will be deployed and ready to go in no time. This template can be customized for your specific needs. For example: Replacing the selected container registry, changing the path of the file location, etc.\n\n### How to deploy to Elastic Cloud Compute (EC2) with GitLab\nIn [GitLab 13.5](/releases/2020/10/22/gitlab-13-5-released/), we created a full `.gitlab-ci.yml` template called [`CF-Provision-and-Deploy-EC2.gitlab-ci.yml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Jobs/Deploy/EC2.gitlab-ci.yml) that provisions the infrastructure by leveraging [AWS CloudFormation](https://aws.amazon.com/cloudformation/). It then pushes your previously-built artifact to an [AWS S3 bucket](https://aws.amazon.com/s3/) and deploys the pushed content to [AWS EC2](https://aws.amazon.com/ec2/).\n\n## AWS Deployment: security  considerations\n\n### Predefined AWS CI/CD variables\n\nIn order to deploy to AWS, you must use AWS security keys to connect to to your AWS instance. Users can define this security keys as [CI/CD environment](/topics/ci-cd/) variables that can be used by the deployment pipeline.\n\nIn [GitLab 12.9](/releases/2020/03/22/gitlab-12-9-released/), we added support for predefined AWS variables. This support function helps users know which variables are required for deploying to AWS and also prevents typos and spelling mistakes.\n\n| Env. variable name | Value|\n| --- | --- |\n| `AWS_ACCESS_KEY_ID` | Your Access key ID |\n| `AWS_SECRET_ACCESS_KEY` | Your Secret access key |\n| `AWS_DEFAULT_REGION` | Your region code |\n\n### \"Just-in-time\" guidance for AWS deployments\n\n[GitLab 13.1](/releases/2020/06/22/gitlab-13-1-released/) provides just-in-time guidance for users who wish to deploy to AWS. Setting up AWS deployments isn't always as easy as we'd like it to be, so we've added in-product links to our AWS templates and documentation when you start adding AWS CI/CD variables to make it easier for you to use our AWS features. This will help you get up and running faster.\n\n![In-product guidance for AWS](https://about.gitlab.com/images/blogimages/aws_guide.png)\n\nAWS guide from CI/CD variables\n\n### Added security for the GitLab's official AWS Docker images\n\nIn [GitLab 13.5](/releases/2020/10/22/gitlab-13-5-released/), we changed the image identifier from the release version number to the Docker image digest. Docker supports immutable image identifiers and we adopted this best practice to update our cloud-deploy images. When a new image is tagged, we also programmatically retrieve the image digest upon its build and create a release note to effectively communicate this digest to users. This guarantees that every instance of the service runs exactly the same code. You can roll back to an earlier version of the image, even if that version wasn't tagged (or is no longer tagged). This can even prevent race conditions if a new image is pushed while a deploy is in progress.\n\n![Docker Image Digest](https://about.gitlab.com/images/blogimages/digest1.png)\n\nDocker image digest or release tag\n\n## AWS Deployment: auto DevOps support\n\nGitLab already supports Kubernetes users deploying to AWS EKS cluster. Click the link to read instructions about [how to deploy an application to a GitLab-managed Amazon EKS cluster with Auto DevOps](/blog/deploying-application-eks/#:~:text=The%20Auto%20DevOps%20function%20at,build%2C%20and%20deploy%20your%20application).\n\nWe also expanded Auto DevOps to support non-Kubernetes users. Users can specify their deployment target by adding the `AUTO_DEVOPS_PLATFORM_TARGET` variable under the CI/CD variables settings. Specifying the deployment target platform builds a full CI/CD pipeline that deploys to AWS targets.\n\nWe currently support:\n\n- `AUTO_DEVOPS_PLATFORM_TARGET: ECS` (added in GitLab 13.0)\n- `AUTO_DEVOPS_PLATFORM_TARGET: FARGATE` (added in GitLab 13.2)\n- `AUTO_DEVOPS_PLATFORM_TARGET: EC2` (added in GitLab 13.6)\n\nFor more information about Auto DevOps for AWS targets, see [requirements for Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/requirements.html) documentation.\n\nHere's a quick recording for how to use Auto Deploy to Amazon ECS:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/HzRhLLFlAos\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\nSpeed run on how to use auto deploy to EC2 (animation):\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/rVr-vZfNL6U\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## AWS Deployment: Future plans to extend deployment support via GitLab\n\nCheck out some of the open issues below to see our plans are for the future of deploying to AWS using GitLab.\n\n- [Show AWS deployment success code in logs](https://gitlab.com/gitlab-org/gitlab/-/issues/215333): This will bring the success/failure codes from AWS into your GitLab pipeline logs, allowing you to see the deployment success code without needing to go into the AWS console to retrieve the logs.\n- [Show AWS deployment success code in pipeline view](https://gitlab.com/gitlab-org/gitlab/-/issues/232983): This will bring the success/failure codes from AWS into your GitLab pipeline, allowing you to see if the deployment job was successful in one view.\n- [Auto Deploy to AWS S3](https://gitlab.com/gitlab-org/gitlab/-/issues/219087): This will expand the supported deployment targets covered in this blog to include [S3 buckets](https://aws.amazon.com/s3/) as well.\n- [AWS integration per-environment role management](https://gitlab.com/gitlab-org/gitlab/-/issues/27107): This returns a set of temporary security credentials you can use to access AWS resources that you normally might not be able to access. This is accomplished by using the [AWS IAM](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) roles.\n\n## More material on deploying to EKS and Lambda\n\n- [Demo of how to deploy to EKS](https://docs.google.com/presentation/d/1iXnB6lvTx2_-_0ASElLUDZwyFPWILCRx54XjJkMFuw0/edit#slide=id.g6bb36a7017_2_42).\n- [Whitepaper on how to deploy on AWS from GitLab](/resources/whitepaper-deploy-aws-gitlab/).\n\nWe invite you to contribute to our other cloud provider solutions:\n\n- [Streamline GCP deployments](https://gitlab.com/groups/gitlab-org/-/epics/2706).\n- [Streamline Azure deployments](https://gitlab.com/groups/gitlab-org/-/epics/4846).\n\nAt GitLab, [everyone can contribute](/company/strategy/#contribute-with-gitlab). If you want to deploy to a target that isn't mentioned in this post, please let us know by adding an issue and linking it to our [Natively support hypercloud deployments](https://gitlab.com/groups/gitlab-org/-/epics/1804) epic.\n\nCover image by [SpaceX](https://unsplash.com/photos/uj3hvdfQujI) on [Unsplash](https://www.unsplash.com)\n",[1041,9,976,977],{"slug":2224,"featured":6,"template":686},"deploy-aws","content:en-us:blog:deploy-aws.yml","Deploy Aws","en-us/blog/deploy-aws.yml","en-us/blog/deploy-aws",{"_path":2230,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2231,"content":2237,"config":2244,"_id":2246,"_type":14,"title":2247,"_source":16,"_file":2248,"_stem":2249,"_extension":19},"/en-us/blog/developer-relations-at-gitlab-what-weve-learned-since-our-start",{"title":2232,"description":2233,"ogTitle":2232,"ogDescription":2233,"noIndex":6,"ogImage":2234,"ogUrl":2235,"ogSiteName":670,"ogType":671,"canonicalUrls":2235,"schema":2236},"Developer Relations at GitLab: What we've learned since our start","DevRel is key to success for many tech companies. Find out how GitLab's DevRel program has evolved to stay aligned with the industry and our customers.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672008/Blog/Hero%20Images/AdobeStock_204527293.jpg","https://about.gitlab.com/blog/developer-relations-at-gitlab-what-weve-learned-since-our-start","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Developer Relations at GitLab: What we've learned since our start\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Coghlan\"}],\n        \"datePublished\": \"2024-03-13\",\n      }",{"title":2232,"description":2233,"authors":2238,"heroImage":2234,"date":2239,"body":2240,"category":2241,"tags":2242},[1686],"2024-03-13","Earlier this year, a tweet (are they still called that?) by [Kelsey Hightower](https://twitter.com/kelseyhightower) sparked discussion on social media and internally at GitLab. \n\n![Kelsey Hightower tweet](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678041/Blog/Content%20Images/Screenshot_2024-03-08_at_8.19.09_AM.png)\n\nAt first, Kelsey's response might seem a bit flippant, but there’s an underlying truth to it: Developer Relations (short: DevRel) – and other business functions – must meet the needs of the business and your customers. However, what your stakeholders and customers need will be different in the future. Therefore, to be successful, you have to iterate to stay aligned with them. \n\nReflecting back on my five years working in Developer Relations (formerly known as Community Relations) at GitLab, our team has continuously evolved to stay aligned with the needs of our customers, our community, and the business. GitLab CEO and founder Sid Sijbrandij explains how North Star Metrics evolve in his blog post on goal-setting for startups: [Artificially constraining your company to one goal creates velocity and creativity](https://opencoreventures.com/blog/2023-06-05-artificially-constrain-one-goal-to-create-creativity-velocity/). He details the shift from attention to active users to revenue to profit. The evolution of DevRel at GitLab in many ways maps to that same journey.\n\n![What is DevRel - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678041/Blog/Content%20Images/image1.png)\n\n## Early DevRel at GitLab\n\nWhen I joined GitLab in 2018, our team was largely made up of Community Advocates, an Evangelist Program Manager (me), a Code Contributor program manager, and a director. The Community Advocates were tasked with monitoring and engaging with GitLab community members across various online channels but primarily [Hacker News](https://handbook.gitlab.com/handbook/marketing/developer-relations/developer-evangelism/hacker-news/) and Twitter. Answering questions and creating issues based on comments served to increase awareness and attention for GitLab. In addition, users learned that their questions would be answered and feedback was being heard and, frequently, acted on.\n\nAt the same time, the Code Contributor program and Evangelist program were driving growth and interest in GitLab by helping our contributors navigate the contribution process, organizing events and meetups to connect our community, and deepening our relationship with our community champions, also known as [GitLab Heroes](https://about.gitlab.com/community/heroes/). \n\nFor companies in early stages, this is how DevRel often looks. The key tactics in this phase are:\n- use low-cost tools (blogs and social media) to drive attention\n- capitalize on people’s interest to deepen relationships and create advocates and champions\n- smooth the pathways to contribute or discover content\n\n> **Tip:** Direct engagement with your community through social media and online forums drives awareness, builds trust, and increases the quality and volume of feedback on your product. \n\n## Expanding DevRel's reach \n\nNext, we ramped up programs like GitLab for Open Source and GitLab for Education. These programs helped attract to our platform key open source projects and many large academic institutions, both with large numbers of engaged users. More users meant more feedback to help us improve the product and more contributors. \n\nAs attention grew and the breadth and depth of our platform increased, we needed to better enable our customers to leverage the capabilities of GitLab’s DevSecOps Platform. This stage roughly maps to the revenue North Star Metric. To drive greater awareness and adoption, the Community Relations team underwent a critical change.\n\n> **Tip:** When looking to grow your active users, engage with partners who can bring their community to your product or platform. This strategy is often overlooked but can be a big boost to awareness and growth, setting you up for success. \n\n## Deepening the DevRel bench\n\nAs our next move, we formed a team of technical experts, known as Developer Evangelists. This team engaged in more traditional DevRel practices, those that might come to mind when asking yourself “What is DevRel?”. Internally, we referred to this team’s role as the three Cs: \n- Content creation - creating blog posts, technical talks, demos, and other content to enable our customers\n- Community engagement - engaging online and at events with our customers and community\n- Consulting - serving as internal advocates for and experts on the wider GitLab community\n\nHaving technical experts who could connect directly with customers and escalate that feedback internally helped improve the feedback loop between users and product teams. This team also deeply understood GitLab users, which improved the company's ability to enable our customers and community through content.\n\n> **Tip:** Early in your company journey, executives, product managers, and engineers play a vital role in engaging with community. As the number of users grows, you’ll need technical experts on your team who can directly engage with users and ensure customer feedback reaches key stakeholders (executives and product owners).\n\n## Continuously evolving DevRel at GitLab\n\nOver the past year, the team has evolved again.\n\n- A new vice president joined our team and has helped us become more strategic and better aligned cross-functionally.\n\n- A Contributor Success team was established to better engage and align with our customers around contributions to GitLab. Evolving from a one-person function to a full-fledged team of engineers with deep experience in open source (including multiple past contributors to GitLab), this team continuously improves the contribution experience and engages directly with customers who wish to contribute.\n\n- We updated our team name and many of our team members’ job titles to align with industry standards.\n\n- And we’ve all ramped up quite a bit on AI, perhaps you’ve heard of [GitLab Duo](https://about.gitlab.com/gitlab-duo/)? \n\nAs GitLab continues to mature as a public company, the team will continue to evolve. Through these changes, we will stay focused on increasing the efficiency and impact of our efforts for our customers, our product, and our team.\n\n## Gaining - and maintaining - executive buy-in\n\nExecutive buy-in is essential for DevRel. Look at the companies with the largest, most engaged communities and you will find that those companies also have the most active, engaged, and often highly respected founders and CEOs. This is certainly true with GitLab. \n\nGitLab’s engagement with our community began before we were even a company when Dmitriy Zaporozhets (DZ) started the open source GitLab project with [this commit](https://gitlab.com/gitlab-org/gitlab-foss/commit/9ba1224867665844b117fa037e1465bb706b3685). The engagement continued when Sid [launched GitLab on Hacker News](https://news.ycombinator.com/item?id=4428278).\n\nThe importance of community in GitLab’s success cannot be overstated, and while we’ve grown to heights that few companies reach, contributions from our customers and community remain central in [our strategy](https://handbook.gitlab.com/handbook/company/strategy/#dual-flywheels). Because of this, team members, from the highest levels of GitLab and throughout our organization, remain in active communication with our customers via issues and social forums, working hard at all times to help them succeed. Transparency is key here. Documenting our DevRel strategies in the [public GitLab handbook](https://handbook.gitlab.com/handbook/marketing/developer-relations/) enables everyone to contribute.\n\n> **Tip:** Executive support is critical when building a community.\n\n## So what is DevRel?\n\nI want to go back to the initial question that sparked this blog: What is DevRel? \n\nI’ll leave you with a quote from Emilio Salvador, vice president of Developer Relations at GitLab, which was recently merged to [our handbook page](https://handbook.gitlab.com/handbook/marketing/developer-relations): \n\n\u003Ci>\"Developer Relations (short: DevRel) operates at the intersection of technology, community, and advocacy, serving as the voice and ears of GitLab in the wider tech world. Their core mission revolves around nurturing and sustaining a vibrant, engaged community of developers, contributors, and users. This involves a multifaceted approach that includes creating educational content, organizing events and workshops, developing programs, and providing platforms for knowledge exchange and collaboration. The team not only focuses on promoting GitLab’s features and capabilities but also actively listens to and incorporates feedback from the community to inform product development and improvements.\"\u003C/i>\n\nThat’s what it is today, but if the history of DevRel at GitLab is any indication, I expect that we’ll continue to iterate going forward. \n\n> [Join our Discord community](https://discord.gg/gitlab) to continue the conversation.\n","culture",[9,2243,728],"DevSecOps",{"slug":2245,"featured":91,"template":686},"developer-relations-at-gitlab-what-weve-learned-since-our-start","content:en-us:blog:developer-relations-at-gitlab-what-weve-learned-since-our-start.yml","Developer Relations At Gitlab What Weve Learned Since Our Start","en-us/blog/developer-relations-at-gitlab-what-weve-learned-since-our-start.yml","en-us/blog/developer-relations-at-gitlab-what-weve-learned-since-our-start",{"_path":2251,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2252,"content":2258,"config":2263,"_id":2265,"_type":14,"title":2266,"_source":16,"_file":2267,"_stem":2268,"_extension":19},"/en-us/blog/developers-write-secure-code-gitlab",{"title":2253,"description":2254,"ogTitle":2253,"ogDescription":2254,"noIndex":6,"ogImage":2255,"ogUrl":2256,"ogSiteName":670,"ogType":671,"canonicalUrls":2256,"schema":2257},"4 Ways developers can write secure code with GitLab","GitLab Secure is not just for your security team – it’s for developers too. Learn four ways to write secure code with GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666895/Blog/Hero%20Images/developers-write-secure.jpg","https://about.gitlab.com/blog/developers-write-secure-code-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"4 Ways developers can write secure code with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2019-09-03\",\n      }",{"title":2253,"description":2254,"authors":2259,"heroImage":2255,"date":2260,"body":2261,"category":679,"tags":2262},[1016],"2019-09-03","\nWriting secure code is a standard part of day-to-day development work, but\nsecurity often appears to be a roadblock instead of a critical piece of the\npuzzle. To make security efforts easier, [GitLab Secure](/stages-devops-lifecycle/secure/)\noffers a number of different tools that help developers identify and remediate vulnerabilities\nwithin their code, _as they’re writing it_. Our goal is to seamlessly integrate\nsecurity into your code writing practices so you’re better able to protect\nyour business from growing cybersecurity threats.\n\n## Testing\n\nThere are a variety of testing tools available to developers within GitLab.\nGenerally, they alert developers to vulnerabilities within their code and report\nthem within the merge request so developers can adjust their code as they\ngo. In addition to the testing methods outlined below, developers can also [use\nother tools outside of GitLab](https://handbook.gitlab.com/handbook/product/gitlab-the-product/#plays-well-with-others) by integrating\nthe results of your scanners with our merge request security reports.\n\n### Static application security testing\n\nOur [static application security testing](https://docs.gitlab.com/ee/user/application_security/sast/index.html)\n(SAST) tool scans the application source code\nand binaries to spot potential vulnerabilities before deployment. It uses open\nsource tools that are installed as part of GitLab. Vulnerabilities are shown\nin-line with every merge request and results are collected and presented as a\nsingle report.\n\n### Secret detection\n\n[Secret detection](https://docs.gitlab.com/ee/user/application_security/sast/#secret-detection)\nwithin GitLab is able to detect secrets and credentials that\nhave been unintentionally pushed to the repository. This check is performed by\na specific analyzer during the SAST job, runs regardless of the programming\nlanguage of your app, and displays results within the SAST report.\n\n### Dynamic application security testing\n\nOur [DAST tool](https://docs.gitlab.com/ee/user/application_security/dast/index.html)\nanalyzes your web application for known runtime\nvulnerabilities. It conducts live attacks against a review app and can be created for every\nmerge request as part of GitLab’s [CI/CD capabilities](/topics/ci-cd/). Users can provide HTTP\ncredentials to test private areas. Vulnerabilities are shown in-line with every\nmerge request.\n\n### Dependency scanning\n\n[Dependency scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/index.html)\nanalyzes external dependencies (e.g. libraries like Ruby gems) for known\nvulnerabilities on each code commit with GitLab CI/CD. This scan relies on open\nsource tools and on the integration with [Gemnasium](https://docs.gitlab.com/ee/user/project/import/index.html)\ntechnology (now part of\nGitLab) to show, in-line with every merge request, vulnerable dependencies\nin need of updating. Results are collected and available as a single report.\nDependency scanning also provides a list of your project’s dependencies with\ndifferent versions for languages and package managers supported by Gemnasium.\n\n### Container scanning\n\nIf you’re using GitLab CI/CD, [container scanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/index.html)\nwill let you check Docker images (and containers) for\nknown vulnerabilities in the application environment. Analyze image contents\nagainst public vulnerability databases using the open source tool, [Clair](https://coreos.com/clair/docs/latest/),\nthat\nis able to scan any kind of Docker (or app) image. Vulnerabilities are shown\nin-line with every merge request.\n\n### License management\n\nUpon code commit, project dependencies are reviewed for [approved and blacklisted\nlicenses](https://docs.gitlab.com/ee/user/compliance/license_compliance/index.html)\ndefined by custom policies per project. Software licenses are\nidentified if they are not within policy, and new licenses are also listed if\nthey require a status designation. This scan relies on an open source tool,\nLicenseFinder, and license analysis results are shown in-line for every merge\nrequest for immediate resolution.\n\n### Code quality analysis\n\nWith the help of GitLab CI/CD, you can analyze your source code quality using\nGitLab [Code Quality](https://docs.gitlab.com/ee/ci/testing/code_quality.html).\nCode Quality uses [Code Climate Engines](https://codeclimate.com/)\nand runs in pipelines using a Docker image built into the Code Quality\nproject. Once the\nCode Quality job has completed, GitLab checks the generated report, compares the\nmetrics between the source and target branches, and shows the information\nwithin the merge request. With pipelines that enable concurrent testing and\nparallel execution, teams quickly receive insight about every commit, allowing\nthem to deliver higher quality code faster.\n\n### The Security Dashboard\n\nSecurity dashboards in GitLab exist at both the project and group level. The\ngroup dashboard provides an overview of all the security vulnerabilities in your\ngroups and projects. In the dashboard, developers are able to drill down into a\nvulnerability for further details, see which project it comes from and the file\nit’s in, and view various metadata to help analyze the risk.\n\nThe dashboard also allows viewers to\n[interact with vulnerabilities](https://docs.gitlab.com/ee/user/application_security/index.html#interacting-with-the-vulnerabilities)\nby creating an issue for them or dismissing them. For ease of use, vulnerabilities\nwithin the group Security Dashboard can be filtered by severity, confidence, report type, and project.\n\nIn addition to the vulnerability overview, the group Security Dashboard also\nprovides a timeline that displays how many open vulnerabilities your projects\nhad at various points in time. While security scans are automatically run for\neach code update, you’ll have some default branches that are infrequently\nupdated. To keep your Security Dashboard up to date on those branches, you can\nuse GitLab to [configure a scheduled pipeline](https://docs.gitlab.com/ee/ci/pipelines/schedules.html)\nto run a daily security scan.\n\n## What’s next for GitLab Secure?\n\nWhile we already have a number of ways to help you write secure code and build\nsecure products and services, we’re always looking for ways to give you more.\nHere are a few of the things we’re working on:\n\n### Interactive application security testing\n\nInteractive application security testing (IAST) checks the runtime behavior of applications by\ninstrumenting the code and\nchecking for error conditions. It is composed by an agent that lives inside the\napplication environment, and an external component, like DAST, that can interact\nand trigger unintended results.\n\n### Fuzzing\n\n[Fuzzing](/direction/secure/dynamic-analysis/fuzz-testing/)\nis a testing technique focused on finding flaws and vulnerabilities in\napplications by sending arbitrary payloads instead of valid input. The idea is to\ntrigger exceptions and unintended code paths that may lead to crashes and\nunauthorized operations. Once a possible problem – like a crash – is found,\nattackers can attempt to find the exact conditions needed to trigger the bug\nand see if they can be fine-tuned to obtain a useful result. (It is worth noting\nthat fuzzing is primarily intended for security teams because it requires more\ntime to execute. While fuzzing is a useful testing method, it should not be a\ndevelopment blocker).\n\n### Vulnerability database\n\nGitLab integrates access to proprietary and open source application security\nscanning tools. In order to maintain the efficacy of those scans, we strive to\nkeep their underlying vulnerability databases up to date.\n\n### Auto remediation\n\nVulnerabilities that require manual intervention to create a fix and push it to\nproduction have a time window where attackers have the ability to leverage the\nvulnerability. Auto remediation aims to automate the vulnerability solution flow and\nautomatically create a fix. The fix is then tested, and if it passes all the\ntests already defined for the application, it is deployed to production.\n\nPhoto by [Daniel McCullough](https://unsplash.com/@d_mccullough?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\non [Unsplash](https://unsplash.com/search/photos/write?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[875,9,109,771,1158],{"slug":2264,"featured":6,"template":686},"developers-write-secure-code-gitlab","content:en-us:blog:developers-write-secure-code-gitlab.yml","Developers Write Secure Code Gitlab","en-us/blog/developers-write-secure-code-gitlab.yml","en-us/blog/developers-write-secure-code-gitlab",{"_path":2270,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2271,"content":2277,"config":2282,"_id":2284,"_type":14,"title":2285,"_source":16,"_file":2286,"_stem":2287,"_extension":19},"/en-us/blog/developing-a-successful-devops-strategy",{"title":2272,"description":2273,"ogTitle":2272,"ogDescription":2273,"noIndex":6,"ogImage":2274,"ogUrl":2275,"ogSiteName":670,"ogType":671,"canonicalUrls":2275,"schema":2276},"Developing a successful DevOps strategy","Here's what it takes to build a DevOps practice that works for everyone on the team.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667540/Blog/Hero%20Images/devops-team-structure.jpg","https://about.gitlab.com/blog/developing-a-successful-devops-strategy","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Developing a successful DevOps strategy\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-03-09\",\n      }",{"title":2272,"description":2273,"authors":2278,"heroImage":2274,"date":2279,"body":2280,"category":769,"tags":2281},[745],"2022-03-09","Some 60% of developers are releasing code 2x faster than before, [thanks to DevOps](https://learn.gitlab.com/c/2021-devsecops-report?x=u5RjB), and a majority of respondents to our 2021 Global DevSecOps Survey said their teams develop software using DevOps or DevSecOps.\n\n[DevOps](/topics/devops/) has had a direct impact on many businesses. Here’s what it takes to develop a successful DevOps strategy.\n\n## What is DevOps?\n\nDevOps is a set of practices that combines dev and ops to create safer software faster.\n\nThe main DevOps principles are automation, [continuous integration and delivery](/topics/ci-cd/) and responding quickly to feedback. Others are agile planning, infrastructure as code (IaC), containerization and microservices. Also, building in quality assurance and security with development and operations through the application lifecycle is important. Incorporating security into a DevOps team is referred to as [DevSecOps](https://about.gitlab.com/topics/devsecops/).\n\nEnabling the speed of delivery while maintaining high software quality requires an [organizational culture shift](https://www.ibm.com/cloud/learn/devops-a-complete-guide) that automates and integrates the efforts of the development and ops teams – two groups that traditionally practiced separately from each other, or in silos.\nBut the best DevOps processes and cultures extend beyond development and operations to incorporate input from all application stakeholders – including platform and infrastructure engineering, security, compliance, governance, risk management, line-of-business, end users and customers – into the software development lifecycle. \n\n## What are the benefits of a successful DevOps strategy?\n\nA successful DevOps strategy puts the focus on the customer. It’s not enough to focus on developing good software because this approach justifies prolonged development and release deadlines. It also overlooks the most critical factor: the consumer of the software. Your customer doesn’t care much about the process – they just want a quality product that will address their problem.  A successful DevOps strategy puts the team in the consumer’s shoes.\n\nAnother benefit of DevOps is that it allows a variety of teams, such as operations, security or project management, to work in an [Agile](/topics/agile-delivery/) setting. While development teams have become more Agile over the years, this occurred in isolation; operations teams have found it challenging to keep up and cannot release software at the same rate. DevOps brings these teams together and accelerates the delivery of software, while keeping the quality high.\n\nShorter development cycles with DevOps produce more frequent code releases, which in turn, makes it easier to spot code defects.\n\n## What key elements make DevOps successful?\n\nLike in most situations, **communication** is key to making a DevOps strategy successful. No business team can function without it, and that goes for a DevOps team. A good DevOps strategy incorporates feedback from developers, co-workers, and key stakeholders when building new systems.\nIT roles used to be more structured and defined, and as mentioned, professionals became used to working in silos. But DevOps has changed that model and work has become more **collaborative**. Teams now need to clearly communicate expectations, requirements and deadlines.\n\nDevOps is about a willingness to **change**. Teams must let go of some of their traditional practices and be open-minded to shifting their focus away from one deliverable and onto the next as business needs and capabilities evolve and change.\n\nTeams must also **accept failure** but not get discouraged by it. Some failure is to be expected, and the concept of [“fail fast”](https://docs.gitlab.com/ee/ci/testing/fail_fast_testing.html) (so you know there’s a problem soon enough to fix it easily) is at the heart of DevOps. They should embrace the possibilities that come from trying new techniques, and not be afraid to get creative. The top teams are those that work together, exchange ideas and push the boundaries of how they work and write more creative code.\n\n## Tips for creating a DevOps roadmap\n\nHaving a standard roadmap provides a DevOps team with a high-level, strategic blueprint of what the company envisions for the product. It’s a valuable reference point for any stakeholder during the software lifecycle. A roadmap also lets ops know when the development team will have a piece of code ready for testing.\n\nWhen creating a DevOps roadmap, make sure to clearly define the objectives and goals. Ask the team what [the collective purpose is for the roadmap](https://www.productplan.com/learn/create-a-devops-roadmap/). Objectives might include:\n\n- Improving engineering and ops teams coordination\n- Creating a single source of truth\n- Building an archive of development and release practices that people can refer to over time that are based on the most effective processes. This will help improve DevOps efforts going forward.\n\nFocused, short-term goals and plans should be established. Organizations typically plan their product roadmaps between 2 and 6 months out.\n\nA common mistake businesses make when building roadmaps is to use text only. By just using word processing documents or spreadsheets, stakeholders won’t get a clear understanding of what’s a high priority, which initiatives are dependent on others and who’s responsible for what.\n\nVisual roadmaps, complete with color-coding and bars, helps stakeholders more easily understand product plans. Roadmaps should also be kept current to reflect changes within the company’s culture and business model.\n\n## What are some common challenges associated with DevOps?\n\nChange isn’t easy and the merging of development and operations may cause a few clashes, but those involved must keep in mind that building a successful DevOps team requires this integration and collaboration between both sides. \nMake a gradual move into DevOps by starting with a small product or component and build from there.\n\nThere can also be challenges with deciding what tools to use, since there are so many available. This makes selecting a tool hard, especially if there’s a lack of knowledge about the technology behind it. Using a [DevOps platform](/topics/devops-platform/) can streamline all these choices as all of the moving parts of DevOps will be available and integrated in one single offering. \n\n[Momentum for DevOps](/blog/a-snapshot-of-modern-devops-practices-today/) is clearly growing because organizations are eager to take advantage of delivering software in shorter development cycles, while enhancing innovation in more stable operating environments and with performance-driven employee teams.",[9,749,855],{"slug":2283,"featured":6,"template":686},"developing-a-successful-devops-strategy","content:en-us:blog:developing-a-successful-devops-strategy.yml","Developing A Successful Devops Strategy","en-us/blog/developing-a-successful-devops-strategy.yml","en-us/blog/developing-a-successful-devops-strategy",{"_path":2289,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2290,"content":2295,"config":2300,"_id":2302,"_type":14,"title":2303,"_source":16,"_file":2304,"_stem":2305,"_extension":19},"/en-us/blog/devops-adoption",{"title":2291,"description":2292,"ogTitle":2291,"ogDescription":2292,"noIndex":6,"ogImage":805,"ogUrl":2293,"ogSiteName":670,"ogType":671,"canonicalUrls":2293,"schema":2294},"Understand how your teams adopt DevOps with DevOps reports","Learn about analytics, DevOps reports, DevOps scores, and more.","https://about.gitlab.com/blog/devops-adoption","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Understand how your teams adopt DevOps with DevOps reports\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Orit Golowinski\"}],\n        \"datePublished\": \"2021-12-15\",\n      }",{"title":2291,"description":2292,"authors":2296,"heroImage":805,"date":2297,"body":2298,"category":769,"tags":2299},[2219],"2021-12-15","\n\nGitLab has an extraordinary range of features for a single application, providing an [entire DevOps platform](/stages-devops-lifecycle/) from [portfolio planning](/stages-devops-lifecycle/plan/) all the way through to [monitoring](/stages-devops-lifecycle/monitor/) and [service desk](https://docs.gitlab.com/ee/user/project/service_desk/). As such, GitLab is uniquely positioned to deliver a complete picture of your organization's DevOps journey and your return on investment in automation and DevOps practices.\n\nSome of the most interesting and difficult questions that organizations ask themselves are:\n\n* What do we gain from different development practices used by our teams?\n* What makes one team more efficient than another?\n* What practices have been successful in one team that we can introduce to others?\n\n## Analytics\n\nGitLab has several metrics to give you insight into the development lifecycle:\n\n* [Application Security](https://docs.gitlab.com/ee/user/application_security/security_dashboard/#project-security-dashboard) -  provides a comprehensive set of features for viewing and managing vulnerabilities.\n* [CI/CD](https://docs.gitlab.com/ee/user/analytics/ci_cd_analytics.html) - tracks the history of your pipeline successes and failures, as well as how long each pipeline ran.\n* [Code Review](https://docs.gitlab.com/ee/user/analytics/code_review_analytics.html) - displays open merge requests and their review time.\n* [Insights](https://docs.gitlab.com/ee/user/project/insights/index.html)- allows you to configure custom analytics that will be displayed.\n* [Issue](https://docs.gitlab.com/ee/user/group/issues_analytics/index.html) - illustrates the number of issues created each month.\n* [Merge Request](https://docs.gitlab.com/ee/user/analytics/merge_request_analytics.html) - displays information that will help you evaluate the efficiency and productivity of your merge request process.\n* [Repository](https://docs.gitlab.com/ee/user/analytics/repository_analytics.html) - displays information such as commit statistics, code coverage, and programming languages used in the repository.\n* [Value Stream Analytics](https://docs.gitlab.com/ee/user/analytics/value_stream_analytics.html) - measures the time spent to go from an idea to production.\n\nSome analytics are only available for instance-level (self-managed), group level, or project level. Read [more](https://docs.gitlab.com/ee/user/analytics/) about analytics.\n\nThese analytics are a great way to see contributions from different projects and groups. On their own, however, they don't give insights into which processes your teams are using. For that, we offer DevOps Reports.\n\n## DevOps adoption reports\n\nDevOps Adoption is a DevOps Report located in group-level analytics. It shows you data for how teams in your organization use the most essential GitLab features.\n\nYou can use DevOps Adoption to:\n\n- Identify specific subgroups that are lagging in their adoption of GitLab features, so you can guide them on their DevOps journey.\n- Find subgroups that have successfully adopted certain features, and could provide guidance to other subgroups on how to use those features.\n- Verify if you are getting the return on investment that you expected from GitLab.\n\n![DevOps Adoption](https://about.gitlab.com/images/blogimages/devops_reports.png){: .shadow}\n\nIn this example, we can see some interesting data on how a team uses features in development, security, and operations categories:\n\n* **Development**\n  * Approvals: At least one merge request approval on a merge request.\n  * Code owners: At least 1 defined code owner that owns a specific file or repository in the group.\n  * Issues: At least 1 issue opened in this group.\n  * Merge requests: At least 1 merge request opened in this group.\n* **Security**\n  * DAST:  At least 1 DAST scan run in a pipeline in the group.\n  * Dependency Scanning: At least 1 dependency scan ran in a pipeline in the group.\n  * Fuzz Testing: At least 1 fuzz testing scan ran in a pipeline in the group.\n  * SAST: At least 1 SAST scan ran in a pipeline in the group.\n* **Operations**\n  * Deployments: At least 1 deployment.\n  * Pipelines: At least 1 pipeline ran successfully.\n  * Runners: At least 1 runner configured for the project or group.\n\nIn the future we plan to add even more feature categories to DevOps Reports, such as:\n* [Environments](https://docs.gitlab.com/ee/ci/environments/#environments-and-deployments)\n* [Pages](https://docs.gitlab.com/ee/user/project/pages/)\n* [Compliance Pipelines](https://docs.gitlab.com/ee/user/project/settings/index.html#compliance-pipeline-configuration)\n* [Incidents](https://docs.gitlab.com/ee/operations/incident_management/incidents.html)\n* [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/#review-apps)\n\n...and much more. You can follow our future plans in the following [epic](https://gitlab.com/groups/gitlab-org/-/epics/5019).\n\n_DevOps Reports are available for the Ultimate tier for self-managed and SaaS users. To find DevOps Reports, go to your group and in the left sidebar, select Analytics > DevOps adoption_\n\n## DevOps Score\n\nYou can use the DevOps score to compare your DevOps status to other organizations.\n\nThe DevOps Score tab shows usage of major GitLab features on your instance over the last 30 days. GitLab calculates the averages feature usage based on the number of billable users in that time period. You can also see the Leader usage score, calculated from top-performing instances based on Service Ping data that GitLab collects. GitLab compares your score to the lead score of each feature and shows it as a percentage underneath the feature. Your overall DevOps Score is an average of your feature scores.\n\nTo analyze your DevOps Score, GitLab aggregates Service Ping (sometimes referred to as Usage Ping) data on GitLab servers for analysis. Your usage information is not sent to any other GitLab instances. If you have just started using GitLab, it may take a few weeks for GitLab to collect enough data to calculate your DevOps Score.\n\n![DevOps Score](https://about.gitlab.com/images/blogimages/dev_ops_score_v12_6.png){: .shadow}\n\n_DevOps score is available at the admin panel for all tiers under Analytics > DevOps Reports._\n\nTo see the DevOps score, you must activate your GitLab instance’s [Service Ping](https://docs.gitlab.com/ee/administration/settings/usage_statistics.html#service-ping). This is because DevOps Score is a comparative tool, so your score data must first be centrally processed by GitLab, Inc.\n\nThere are several benefits of enabling Service Ping, such as DevOps Score and cohorts:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/ZhLrhZlb_zI\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## Cohorts\n\nCohorts shows your teams' GitLab activities over time, and is a useful tool for administrators to view user retention and manage seats in their GitLab instance.\n\n![Cohorts](https://about.gitlab.com/images/blogimages/cohorts_v13_9_a.png){: .shadow}\n\nUsers are considered active if they have performed at least one of the following activities:\n\n* Sign in to GitLab.\n* Perform a Git activity such as push or pull.\n* Visit pages related to dashboards, projects, issues, or merge requests.\n* Use the API.\n* Use the GraphQL API.\n\nCover image credit:\n\nCover image by [John Schnobrich](https://unsplash.com/photos/FlPc9_VocJ4) on [Unsplash](https://unsplash.com)\n{: .note}\n",[9,916,978],{"slug":2301,"featured":6,"template":686},"devops-adoption","content:en-us:blog:devops-adoption.yml","Devops Adoption","en-us/blog/devops-adoption.yml","en-us/blog/devops-adoption",{"_path":2307,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2308,"content":2314,"config":2320,"_id":2322,"_type":14,"title":2323,"_source":16,"_file":2324,"_stem":2325,"_extension":19},"/en-us/blog/devops-and-the-scientific-process-a-perfect-pairing",{"title":2309,"description":2310,"ogTitle":2309,"ogDescription":2310,"noIndex":6,"ogImage":2311,"ogUrl":2312,"ogSiteName":670,"ogType":671,"canonicalUrls":2312,"schema":2313},"DevOps and the scientific process: A perfect pairing","Research teams have taken to DevOps principles and practices. Find out why and how to adopt DevOps in your organization.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668378/Blog/Hero%20Images/hans-reniers-lQGJCMY5qcM-unsplash.jpg","https://about.gitlab.com/blog/devops-and-the-scientific-process-a-perfect-pairing","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevOps and the scientific process: A perfect pairing\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christina Hupy, Ph.D.\"}],\n        \"datePublished\": \"2022-02-15\",\n      }",{"title":2309,"description":2310,"authors":2315,"heroImage":2311,"date":2317,"body":2318,"category":769,"tags":2319},[2316],"Christina Hupy, Ph.D.","2022-02-15","\nThe scientific process and the DevOps lifecycle. At first glance, it’s hard to imagine a connection. Yet, if you look at how some of GitLab’s customers and community members are marrying the two, it makes perfect sense.\n\nTake, for example, the European Space Agency (ESA), which uses GitLab extensively for a variety of purposes, including version control, enabling collaboration, increasing security, and coordinating the intellectual resources of its 22 member states. ESA  has more than 140 groups and 1,500 projects stored on its GitLab instance. In the first year of using the DevOps Platform, ESA ran more than 60,000 pipeline jobs, allowing the organization to deploy code faster and to simplify its toolchain. The projects range from mission control systems, onboard software for spacecraft, image processing, and monitoring tools for lLabs. The ESA IT Department also uses GitLab to host its code tools and configurations infrastructure. Since adopting GitLab, ESA has enjoyed a culture of collaboration that is increasing around the organization.\n\nAs you can see with the ESA example, the connection between research and DevOps is powerful. Let’s examine why this combination works so well.\n\nThe scientific process moves through stages: asking a question, conducting background research, constructing a hypothesis, testing your hypothesis by doing an experiment, analyzing data, and reporting results. This process is very often iterative as new information is discovered throughout. It also is very collaborative as researchers work together to formulate hypotheses, gather data, and analyze the data.  Many artifacts are generated throughout the process, including data, analysis scripts, results, and research papers. Often, software itself is built to run equipment, labs, or process data.\n\nDevOps, the set of practices and tools that combines software development and information technology operations, also moves through stages. [These stages](/stages-devops-lifecycle/) include manage,  plan, create, verify, and release. DevOps is also very iterative and collaborative and many different types of artifacts are generated along the way.\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/devopsinscience1.png)\n\n## How the scientific process and the DevOps lifecycle align\n\nWe aren’t the only ones who noticed the similarities! As researchers were looking for tools to help them organize their plans, data, scripts, and results in a way that allowed them to work collaboratively and efficiently, they started using source control management. Storing their artifacts in a central repository had immediate benefits for collaboration.  It was a natural progression from there expanding across the DevOps lifecycle. As the shift happened and scientists began using the DevOps lifecycle for the scientific process, the results were transformational. Shifting the approach of science to follow the DevOps lifecycle resulted in increased transparency, collaboration, reproducibility,  speed to results, and data integrity.\n\nIn this transformation, the first stages of the scientific method – observing and hypothesizing – equate to the DevOps plan stage.  Hypotheses and research tasks can be managed and documented in issue tracking systems. Issues define what work needs to be done and progress can be tracked with milestones and labels. No information is lost in separate email threads or local documents. Assigning issues to users, along with approver and reviewer features, can make the research process highly efficient among collaborators, graduate students, and mentors.\n\nData collected during the testing stage is stored in a central repository where source control management (SCM) keeps them safe and accessible.  [Git technology](/topics/version-control/what-is-centralized-version-control-system/) allows all changes to be controlled, tagged, versioned with branches, and peer-reviewed through merge requests.  Analysis scripts are also stored in [source code management](/solutions/source-code-management/) as well and run using [continuous integration](/solutions/continuous-integration/)(CI), a.k.a. the verify stage. Containerization is used to replicate computing environments and ensure reproducible results.\n\n## The role of documentation\n\nDevOps platforms are able to transform the scientific research process because the whole research lifecycle can be documented with a single source of truth in a repository,  open, shared, and accessed. Where, currently, only final results are reviewed and published in the form of papers, leaving the rest of the process mostly opaque to reviewers and the public, the DevOps workflow allows access to and collaboration on all stages of the scientific lifecycle. As this one repository hosts all stages of the scientific process, metrics can be generated on all contributions. Researchers around the world can use the same containers, environment, and analysis on their own data ensuring reproducible science.\n\n## Breaking down research silos\n\nMost research today is happening sequentially, with locally optimized research groups working in silos. We often see duplication of work, incomplete documentation of results, and intransparent data and analysis. The DevOps transformation is shifting science to concurrent science where researchers are working collaboratively, with full transparency for reviewers.\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/devopsinscience2.png)\n\n## Examples of the Research-DevOps alliance\n\nLet’s take a look at some examples, in addition to ESA mentioned at the outset. Researchers at MathWorks use DevOps tools workflows to perform requirements-based testing on an automotive lane-following system with Model-Based Design, as mentioned in this article [“Continuous Integration for Verification of Simulink Models”](https://www.mathworks.com/company/newsletters/articles/continuous-integration-for-verification-of-simulink-models-using-gitlab.html).\n\nData and code are stored in an SCM and then  are forked to a testing branch.  CI pipelines are used to run various experiements and  tests on the code. When a test-case failure is detected in a GitLab CI pipeline, the researchers create an Issue to track and discuss the bugfix. The bug is reproduced locally in MATLAB, the issue is fixed, and the tests are run locally. The changes are reviewed on the testing branch. These changes can be committed to the testing branch where the verify, test, and build process is repeated. Researchers then create a merge equest to send the changes of the test branch into the master branch and close the corresponding Issue.\n\nAccording to the authors, “CI is gaining in popularity and becoming an integral part of Model-Based Design”.  The benefits of using CI cited by the researchers include: repeatability, quality assurance, reduced development time, improved collaboration, and audit-ready code.\n\nThe Square Kilometre Array Organisation (SKAO) is leading the design of the globally distributed radio telescope SKA, using GitLab SCM and CI for scientific collaboration, development efficiency, and transparency. According to Lead Software Architect Marco Bartolini, “The large success is having been able to onboard code and software projects from many different organizations and with very different tools and technology into one single platform, easily. It was not a pain, and now we got it all under control. So that's brilliant.”\n\nThe sky is the limit for how DevOps is transforming the scientific research process – perhaps it could transform yours.  Vist [GitLab for Education Program](/solutions/education/) to learn more and watch our “GitLab for Scientific Research” video below.\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/4PRFhDIV_4Q\" frameborder=\"0\" allowfullscreen=\"true\">\u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nCover image by [Hans Reiners](https://unsplash.com/photos/lQGJCMY5qcM) on [Unsplash](https://unsplash.com/)\n",[9,267,793],{"slug":2321,"featured":6,"template":686},"devops-and-the-scientific-process-a-perfect-pairing","content:en-us:blog:devops-and-the-scientific-process-a-perfect-pairing.yml","Devops And The Scientific Process A Perfect Pairing","en-us/blog/devops-and-the-scientific-process-a-perfect-pairing.yml","en-us/blog/devops-and-the-scientific-process-a-perfect-pairing",{"_path":2327,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2328,"content":2333,"config":2340,"_id":2342,"_type":14,"title":2343,"_source":16,"_file":2344,"_stem":2345,"_extension":19},"/en-us/blog/devops-at-nova-scotia-province",{"title":2329,"description":2330,"ogTitle":2329,"ogDescription":2330,"noIndex":6,"ogImage":1393,"ogUrl":2331,"ogSiteName":670,"ogType":671,"canonicalUrls":2331,"schema":2332},"How we introduced DevOps at the province of Nova Scotia","The Linux Ops team and one of the Development teams at the Government of Nova Scotia introduced DevOps practices to their workflow – find out how they did it and what benefits they're now enjoying.","https://about.gitlab.com/blog/devops-at-nova-scotia-province","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we introduced DevOps at the province of Nova Scotia\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Steven Zinck\"},{\"@type\":\"Person\",\"name\":\"Paul Badcock\"}],\n        \"datePublished\": \"2017-08-14\",\n      }",{"title":2329,"description":2330,"authors":2334,"heroImage":1393,"date":2337,"body":2338,"category":679,"tags":2339},[2335,2336],"Steven Zinck","Paul Badcock","2017-08-14","\n\nDevOps is the practice of breaking down silos between Development and Operations teams. DevOps promotes a culture and practices where Dev and Ops teams have open communication and collaboration. This article explains how the Linux Ops team and one of the Development teams at the Government of Nova Scotia were able to implement DevOps practices and realize its benefits.\n\n\u003C!-- more -->\n\n## The beginning\n\nThe Linux Ops team was asked to host a Ruby application built circa 2006. We’re a Red Hat Enterprise Linux shop, provisioning the newest release of RHEL 7 and the Ruby app required gems that are only compatible with RHEL 6 and older. So, we had two options - provision a new RHEL 6 VM - something we haven’t done in over a year, or take this opportunity to containerize the application and use it as a proof of concept. Although we’ve been using containers for over two years in our [Puppet CI](https://medium.com/@szinck/how-we-use-gitlab-at-the-province-of-nova-scotia-708b514cc47f) environment, and have containerized some of our own management apps, this was our first client application to containerize.\n\nYou can also learn more about our DevOps transformation by watching our recent interview:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/SHdeqznJXbc\" frameborder=\"0\" allowfullscreen=\"true\">\u003C/iframe>\n\u003C/figure>\n\n## Ops digs into the application\n\nSince the Ruby code for the application was already in our GitLab, we had easy access to it so we could begin to understand its functionality. How does authentication work? How does SSL work? Where are assets stored? Exactly which gems are required? Does the system send email, and if so, how?\nAs we started to peek into the application, we found the answers to all of these things and a lot more. We were able to make a couple improvements to the application, for example, we were able to remove hard-coded values and switch to variables. In particular, we were able to expose database connection strings which can be leveraged by Docker Swarm.\n\n## The Docker image\n\nNow that we had a better understanding of how the application works, we started working on the container to host the application. We started with a base image of RHEL 6.9 and began layering on the dependencies and the application itself. Since the Development team is naturally very knowledgeable about their application, we collaborated closely with them on this process.\n\n## Automatically building and deploying\n\nOnce we had an image we were happy with, it was time to configure [Docker Swarm](https://docs.docker.com/engine/swarm/) and configure GitLab CI to push the image to our Docker registry.\n\nI’ve included the relevant piece of our CI configuration below. As you can see, we’re tagging the Docker image with the last commit # and pushing it to our internal registry.\n\n```build_image:\n  image: docker:1.12\n  stage: build\n  script:\n    - docker build -t\n    ${DOCKER_REGISTRY}/${NAMESPACE}/${CI_PROJECT_NAME}:${CI_COMMIT_SHA}\n    - docker push  \n    ${DOCKER_REGISTRY}/${NAMESPACE}/${CI_PROJECT_NAME}:${CI_COMMIT_SHA}\n```\n\nNow that the image is up on our registry, we can tell Docker Swarm that a new image is available. Swarm will automatically pull down the new image and reload the application with less than five seconds of downtime.\n\n```\nDOCKER_HOST=\"${DOCKER_DEV_HOST}\" docker service update --image  \n${DOCKER_REGISTRY}/${NAMESPACE}/${CI_PROJECT_NAME}:${CI_COMMIT_SHA}  \n${CI_PROJECT_NAME}_app_1\n```\n\n## Automating security scanning (DevSecOps!)\n\nIn addition to building the image, we also run a battery of security tests against the application code, the operating system, and application in its running state.\n\n![pipeline](https://about.gitlab.com/images/blogimages/devops-nova-scotia-screengrab.png){: .shadow}\u003Cbr>\n\nAs you can see from the pipeline, after the image is built, we run a static code analysis using [Brakeman](http://brakemanscanner.org/). Brakeman tests the code for security issues, and since it’s a code analysis tool, the application doesn’t need to be running. After the code scan, we run [Red Hat’s atomic scanner](https://developers.redhat.com/blog/introducing-atomic-scan-container-vulnerability-detection/) against the image. This tool will notify us of any known security issues in the operating system. Finally, we can deploy the application and then run [Arachni](http://www.arachni-scanner.com/) to test the application in its running state.\n\n## Benefits of DevOps\n\nWe’ve discovered several benefits from this approach:\n\n- The Ops and Dev teams worked closely together, each learning about the other's domain expertise. As Ops discovered issues with the application, we were able to make code changes that were peer-reviewed by the Dev team using the [Git Flow](https://datasift.github.io/gitflow/IntroducingGitFlow.html) development model.\n- The time to delivery for the application has improved drastically, and a framework has been established that existing, new and third-party staff can all leverage.\n- Lower failure ratec - if a new vulnerability is introduced into the stack, we’ll know.\n- Fixes can be applied on demand by Dev without Ops involvement.\n- Recovery of the application is now as simple as two clicks.\n- Dev and Ops both understand how the application functions and have a blueprint of its architecture in the Docker configuration.\n\n## Next steps\n\nWe’re actively collaborating with other Development teams across government to implement DevOps-style practices. From a technology perspective, we’re aggressively working towards improving our technology stack so that we can improve business value for our customers.\n\nThis post originally appeared on [*Medium*](https://medium.com/@szinck/devops-at-the-province-of-nova-scotia-42688759a25d).\n\n### About the Guest Authors\n\n[Steve Zinck](https://www.linkedin.com/in/stevezinck/) spent most of his career working in the Public Service as a Unix and Infrastructure administrator. Over the past few years, he's started to transition away from traditional systems administration and begun to focus on software delivery and automation. As part of that transition, his team has implemented GitLab at the core of our automation and software delivery stack. His current focus is working with software and application teams to assist in streamlining their deployment and delivery process.\n\n[Paul Badcock](https://www.linkedin.com/in/pbadcock/?ppe=1) started working in the IT sector in 1998 with positions in small startups, to large Fortune 500 companies, to currently on a public-sector team. His career was focused as a traditional IT Linux administrator until in the mid-2000s he started focusing on adopting development tooling, practices and methodologies for operational teams. This work culminated in implementing an early 2010s DevOps workplace framework with the help of @stewbawka and subsequently working with like-minded teams since. As a part of adopting developer tools he has previously worked with and managed CVS, SVN installations and various vendor products before reading a “Show HN” posting on Hacker News about GitLab.\n",[9,683],{"slug":2341,"featured":6,"template":686},"devops-at-nova-scotia-province","content:en-us:blog:devops-at-nova-scotia-province.yml","Devops At Nova Scotia Province","en-us/blog/devops-at-nova-scotia-province.yml","en-us/blog/devops-at-nova-scotia-province",{"_path":2347,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2348,"content":2354,"config":2359,"_id":2361,"_type":14,"title":2362,"_source":16,"_file":2363,"_stem":2364,"_extension":19},"/en-us/blog/devops-in-education-2021-survey-results",{"title":2349,"description":2350,"ogTitle":2349,"ogDescription":2350,"noIndex":6,"ogImage":2351,"ogUrl":2352,"ogSiteName":670,"ogType":671,"canonicalUrls":2352,"schema":2353},"DevOps in Education 2021 Survey results","DevOps and GitLab are helping transform higher education. Here's what we learned.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668253/Blog/Hero%20Images/pencil2.jpg","https://about.gitlab.com/blog/devops-in-education-2021-survey-results","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevOps in Education 2021 Survey results\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christina Hupy, Ph.D.\"}],\n        \"datePublished\": \"2022-05-04\",\n      }",{"title":2349,"description":2350,"authors":2355,"heroImage":2351,"date":2356,"body":2357,"category":769,"tags":2358},[2316],"2022-05-04","\n\nIn fall 2021 we launched our second annual DevOps in Education Survey. Over 460 respondents from all regions of the world shared insights on how DevOps and GitLab are transforming higher education. \n\n## Key findings \n\n- **One platform for the win**: Respondents' enthusiasm for teaching GitLab's single DevOps platform increased 190% over 2020; survey takers also pointed to the way GitLab can tie culture to operations as key (up 189% year over year), and they also value student portfolio management (up 200%). \n\n- **CI/CD success**: Academic institutions reported high rates of adoption of GitLab’s CI/CD features both within the classroom and in all other use cases. \n\n- **Flexibility is key**: Deployment flexibility stands out again as a major advantage of GitLab at institutions of higher education. Security and authentication are the primary drivers. \n\n- **GitLab spreads the DevOps love**: Multiple departments within an academic institution are reporting they’re now using GitLab and 21% of respondents said the ability to install multiple instances across a campus was a GitLab advantage (up 6% from 2020).\n\n- **…and more spread = branching out**: Because GitLab has one complete platform, higher ed. respondents report they’re expanding their DevOps footprint to include additional stages like Secure. The three most used stages in education continue to be Source Control Management, Plan, and Verify. Release and Package are also seeing nearly 30% adoption by respondents. \n\n- **Planning features**: Educators find planning features such as multi-level epics, issue tracking features, labels, and project management highly useful tools. \n\n## Why DevOps belongs in the classroom\n\nThe benefits of teaching or learning GitLab came through clearly in the survey. The fact that GitLab is a single DevOps tool was key for 58% of respondents, up from just 20% in 2020. \n\nWhat are the benefits of teaching or learning GitLab?\n\n![Chart of the benefits of teaching or learning GitLab](https://about.gitlab.com/images/blogimages/gleducation2021.png)\n\n## How GitLab in education works\n\nDeployment flexibility is critical to universities because security and server access can be controlled (81%), all while integrating with user authentication systems (54%). The ability to host multiple instances per institution was also a factor for 21% of respondents, up 6% from last year – another sign that cross-campus adoption is growing.\n\nAdvanced features (only available in the Ultimate tier) are used by 35% of respondents, which remained fairly consistent from 2020. Security features including container scanning, SAST, advanced security testing, custom DAST, and compliance management were among the most frequently mentioned. Multi-level epics and free guest users were commonly mentioned as well. \n\n## Use cases and DevOps stages\n\nThe most common use of GitLab in education was source control management with 53% of respondents actively using, followed by Verify (Continuous Integration) at 40%, Plan (issue tracking, labels) 38%,  Manage (authentification, compliance management) at 28%, Package 29% and Release (Continuous Delivery) at 29%. The top four tools other than GitLab used by respondents were GitHub (76%), GitHub Actions (24%), Jenkins (26%), and BitBucket (17%). \n\nFaculty respondents noted the value of bringing industry tools to the classroom. One wrote, “Thank you for the GitLab Program. It makes it possible for us to manage students' software engineering projects in a modern development environment.”\n\n## Leveraging GitLab to boost skills\n\nThe 2021 survey asked an additional question regarding what specific skills are being taught with GitLab in the classroom. The three top skills taught with GitLab are: CI/CD (40%), collaboration and communication (36%), application development and design (30%). Other key skills included understanding process flows and analytics, modern computer technology and architectures, and system architectures. \n\n## About the participants\n\nOf the respondents, 35.9% have and use a GitLab subscription while 37% do not. The majority of respondents (78%) were at a university. There were 50 departments listed in the results; 40% were in a Computer Science Department and 32% in Information Technology. Of those respondents using GitLab, 23 departments were represented. These departments ranged across the academic disciplines including biology, economics, physics, business, and engineering. Respondents were 46% faculty and staff, 41% students, and 7% Administrators. We had a range of respondents from around the world: 39% were from North America, 28% from Europe, 18% from Asia 18%, and 9% from South America. \n\n## GitLab for Education\n\nWe believe that *everyone can contribute*. We are committed to bringing DevOps to education institutions around the world. We provide free, unlimited, top-tier licenses to qualifying educational institutions for teaching, learning, and research. [Learn more here](/solutions/education/). \nAnd see all the results from the [GitLab for Education 2020 Program Survey report](/solutions/education/edu-survey/edu-survey-2020.pdf).\n",[9,1339,267],{"slug":2360,"featured":6,"template":686},"devops-in-education-2021-survey-results","content:en-us:blog:devops-in-education-2021-survey-results.yml","Devops In Education 2021 Survey Results","en-us/blog/devops-in-education-2021-survey-results.yml","en-us/blog/devops-in-education-2021-survey-results",{"_path":2366,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2367,"content":2373,"config":2378,"_id":2380,"_type":14,"title":2381,"_source":16,"_file":2382,"_stem":2383,"_extension":19},"/en-us/blog/devops-is-at-the-center-of-gitlab",{"title":2368,"description":2369,"ogTitle":2368,"ogDescription":2369,"noIndex":6,"ogImage":2370,"ogUrl":2371,"ogSiteName":670,"ogType":671,"canonicalUrls":2371,"schema":2372},"DevOps is at the center of GitLab","GitLab allows companies to do away with the many point solutions that have been digitally duct taped together and instead bring all DevOps functionalities together in ONE place","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683273/Blog/Hero%20Images/Apr_27_Blog_Post_Image_2_-_light.png","https://about.gitlab.com/blog/devops-is-at-the-center-of-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevOps is at the center of GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2022-04-27\",\n      }",{"title":2368,"description":2369,"authors":2374,"heroImage":2370,"date":2375,"body":2376,"category":726,"tags":2377},[1609],"2022-04-27","Accelerating DevOps adoption is core to achieving our mission of allowing everyone to contribute. DevOps enables contribution and collaboration between disparate and previously siloed teams. In fact, DevOps is so central to GitLab that we have incorporated the DevOps infinity loop into our logo. I’m excited to share our new logo and look with you.\n\n## Building the One DevOps Platform\n\nDevOps has come a long way since GitLab was incorporated in 2014. And DevOps strategies are continuing to evolve. For some companies, each team selects their own DevOps tools, which causes problems when teams try to collaborate. For other companies, they select a set of preferred tools. But then they still require a lot of custom work to integrate DevOps point solutions together into a “Do It Yourself DevOps” solution. The more point solutions that are digitally duct taped together, the harder it is to integrate and maintain them all. \n\nAnd that’s why I’m proud that GitLab allows companies to do away with the many point solutions that have been digitally duct taped together and instead bring all DevOps functionalities together in ONE place. \n\nAs someone who is passionate about single sources of truth for information, the concept of One resonates with me. There are many ways in which GitLab as The One DevOps Platform helps customers evolve their DevOps landscape and deliver better results for their organizations.\n\n- One interface\n- One data model\n- One permissions model\n- One value stream\n- One set of reports\n- One spot to secure your code \n- One location to deploy to any cloud \n- One place for everyone to contribute\n\nOne. Platform.\n\nToday, all companies live and die on their ability to create and deliver software. This is true for every type of organization, from the largest global commercial enterprises to the emerging hypergrowth startups.  That is why companies such as Siemens, T-Mobile, and UBS, have selected GitLab as their DevOps platform.\n\n> \"Having the ability to fully develop software in the cloud through GitLab is a game changer, allowing us to accelerate our tech strategy and offer a best-in-class engineering experience. It also means we're able to constantly develop, test and deploy technical solutions while they are running, improving time-to-market for our clients while decreasing costs.\" - Mike Dargan, Chief Digital and Information Officer at UBS.\n\n## Evolving the GitLab brand, iterating our logo and look\n\nIteration is deeply ingrained in our values. We strive to do the smallest thing possible to get to the best result as quickly as possible. This value leads to quicker learning and tighter feedback loops.\n\nI see this moment both as a symbol of GitLab’s growth and of the evolution of DevOps itself. To reinforce this moment, we are also evolving our logo. The new logo places GitLab at the center of the DevOps infinity loop. I am pleased that we chose to iterate instead of a step change – staying true to our values. \n\n![Animation of GitLab logo](https://about.gitlab.com/images/blogimages/GitLab-Logo-Animation-onWhite-500x300.gif){: .shadow}  \n\n## And we’re just getting started\n\nI aspire for GitLab to represent a place where we elevate others through knowledge access, job access, and The One DevOps platform. More to come later this year as we work to help individuals elevate their careers by learning core DevOps principles and how to use GitLab.\n\nThank you to our amazing customers for choosing GitLab as your One DevOps Platform. Most importantly, thank you for believing in a mission where everyone can contribute and live the GitLab values every day.\n\n_GitLab releases new features on the 22nd of each month. We invite you to the [GitLab 15 release event](https://page.gitlab.com/fifteen) to experience exciting new elements of The One DevOps Platform. Join us._",[726,9],{"slug":2379,"featured":6,"template":686},"devops-is-at-the-center-of-gitlab","content:en-us:blog:devops-is-at-the-center-of-gitlab.yml","Devops Is At The Center Of Gitlab","en-us/blog/devops-is-at-the-center-of-gitlab.yml","en-us/blog/devops-is-at-the-center-of-gitlab",{"_path":2385,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2386,"content":2392,"config":2398,"_id":2400,"_type":14,"title":2401,"_source":16,"_file":2402,"_stem":2403,"_extension":19},"/en-us/blog/devops-predictions-gitlab-experts-weigh-in-on-ai-security-remote-work-and-more",{"title":2387,"description":2388,"ogTitle":2387,"ogDescription":2388,"noIndex":6,"ogImage":2389,"ogUrl":2390,"ogSiteName":670,"ogType":671,"canonicalUrls":2390,"schema":2391},"2022 DevOps predictions: AI, security, remote work & more","Want to see into the DevOps future? We’ve got insights to share, including the challenges for AI/ML and the impact of cloud-native on DevSecOps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683162/Blog/Hero%20Images/tomasz-frankowski-kbufvkbfioe-unsplash.jpg","https://about.gitlab.com/blog/devops-predictions-gitlab-experts-weigh-in-on-ai-security-remote-work-and-more","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"2022 DevOps predictions: GitLab experts weigh in on AI, security, remote   work, and more\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2021-12-06\",\n      }",{"title":2393,"description":2388,"authors":2394,"heroImage":2389,"date":2395,"body":2396,"category":679,"tags":2397},"2022 DevOps predictions: GitLab experts weigh in on AI, security, remote   work, and more",[745],"2021-12-06","2022 is set to be a big year for [DevOps](/topics/devops/), especially when it comes to integrating AI and machine learning, pushing security further left in the development cycle, and expanding opportunities for open source and remote work. We’ve gathered eight predictions from the top minds here at GitLab about the DevOps platform and the DevOps industry overall.\n\n## 1. AI/ML adoption will increase and will be instrumental in addressing supply chain issues and labor shortages.\n\n[Taylor McCaslin](https://gitlab.com/tmccaslin), Group Manager, Product - ModelOps & Anti-Abuse, says:\n\n“We’re going to see increased adoption of [AI/ML](/direction/modelops/ai_assisted/) across all industries. With the labor and supply chain shortages and dramatic shifts in climate-related events, companies globally are having to learn to do more with less in even more dynamic environments. AI/ML is well-suited to solve some of these complex problems in industries we may not have expected [adoption from] this early.\n\nWe have started seeing governments embrace AI/ML technologies. When you think about it, governments are by definition inefficient, but they hold a lot of data that’s ripe territory for AI/ML to make an impact. Take the Internal Revenue Service in the U.S., for example. ML applied to process paper tax returns or to look for anomalies could reduce costs and increase revenue from catching tax fraud and data entry mistakes. Also, with Covid-19 not looking like it will go away anytime soon, there are huge data problems that are well suited for AI/ML in tracking and proving vaccination status. The list for AI/ML is endless.\n\nAI/ML still is a specialty field. So businesses need to have clear use cases for hiring data science teams and setting them up for success to deploy models into production. We still see friction between traditional DevOps technologies and new data science platforms slowing time to value and increasing the cost of developing AI/ML technologies, but those problems are becoming more understood and we’ll see that gap shorten over time reducing cost and complexities.”\n\n## 2. Businesses will continue to integrate security more tightly into DevOps and create DevSecOps teams to reduce risk, speed deployment, and gain a competitive advantage.\n\n[Johnathan Hunt](https://gitlab.com/JohnathanHunt), Vice President of Security, says:\n\n“The [DevSecOps](/blog/gitlab-is-setting-standard-for-devsecops/) practice will continue to increase in 2022 as more organizations understand the efficiencies and improved security of this strategy. Further, those that are currently leveraging DevSecOps as part of their development practice are realizing the benefits with fewer vulnerabilities, faster deployments, less time spent in corrective actions, and an overall reduction of risk. Ultimately, this will provide companies with a differentiated approach, leading to competitive advantages in their space.\n\nDevSecOps is important to prioritize due to the increased threat landscape that remote work models introduce. It is imperative that companies focus on transformative ways to protect their product and data to effectively manage their overall risk posture. DevSecOps is a proven strategy that reduces risk and security incidents while allowing faster and more secure code deployments.”\n\n## 3. Two of the biggest buzzwords of 2021 will take divergent paths next year: Kubernetes will play a fundamental role in DevSecOps, while zero trust will see only moderate gains.\n\nHunt says:\n\n“DevOps users have come to realize the benefits of operating security controls natively within Kubernetes rather than separate tools and separate teams adding steps to the process. This is a fundamental component to furthering the DevSecOps story. Additionally, the [Kubernetes](/blog/gitlab-kubernetes-agent-on-gitlab-com/) platform is continuing to evolve and adapt to the need for greater control and automation within reach of DevOps users leading to the natural and highly advantageous shift left strategy.\n\nMeantime, although we are seeing an increase in the implementation of certain zero trust principles, overall the industry has been slow to respond. Much of this is due to the understanding, complexity, and difficulty of implementing full zero-trust models within the tech stack. I predict 2022 will, at best, see a moderate gain in the adoption of [zero trust](/blog/questions-regarding-our-zero-trust-efforts/).”\n\n## 4. Secure software supply chain will become a standard element of security strategy for government organizations.\n\n[Bob Stevens](https://gitlab.com/bstevens1), Area Vice President of Public Sector, says:\n\n“Federal agencies are starting to tackle software supply chain security, spurred by guidance from NIST and actions outlined in Executive Orders issued in early 2021. While these guidelines are critical to success, agencies will rise to the challenge of implementing new security measures instead of waiting to act. Regardless of the publication of final guidance, CIOs will implement actions for software supply chain security to proactively defend their agencies. CIOs know that enhancing cyber defenses immediately is crucial to outsmarting adversaries, and they will not delay in enacting change. Once guidelines are final, CIOs will adjust their policies to meet best practices.\n\nTo ensure security in the software supply chain, people, processes, and technologies need to work together in unison. This includes code that has been examined by numerous security personnel, build processes that take place in the open, and high-quality software that is tested and trusted. Software factories and contractors that work with them will also need to put in place a comprehensive and continuously monitored software bill of materials (SBOM), allowing everyone touching the software to fully understand the dependencies and vulnerabilities of their ecosystems.\n\nA DevOps platform can address many important security considerations. With security scanners built into the development process, agencies can scan every line of code as it is committed, allowing developers to identify and remediate vulnerabilities before they are pushed.“\n\n## 5. Cloud adoption will extend to other parts of the development life cycle, including developers’ own environments. \n\n[Brendan O’Leary](https://gitlab.com/brendan), Staff Developer Evangelist, says:\n\n“I still see a lot of enterprises or individual teams that find themselves at [various phases of DevOps](/blog/welcome-to-the-devops-platform-era/). So I believe that 2022 will bring a shift towards platforms - either through DIY or adoption of a DevOps platform. We’ll see more adoption of cloud technologies for other parts of the development lifecycle as well, such as developers’ own environments.”\n\n## 6. Open source will grow beyond a common software development practice to a full business model embraced by organizations.\n\n[Cesar Saavedra](https://gitlab.com/csaavedra1), Technical Marketing Manager, says:\n\n“Open source growth will continue in the future, and not just as a way to develop software but also as a business model. Not only have companies realized the need to be [digital leaders](https://www.capgemini.com/wp-content/uploads/2017/07/The_Digital_Advantage__How_Digital_Leaders_Outperform_their_Peers_in_Every_Industry.pdf) to be successful in the market, but also large commercial vendors are becoming open source and switching to this business model to stay competitive and open-source startups have caught [the interest of investors](https://techcrunch.com/2021/06/26/2170552/). Open source is taking over the software market. In fact, the Open Source Services Market is [predicted to grow](https://www.businesswire.com/news/home/20201113005374/en/66.84-Billion-Open-Source-Services-Market-by-Industry-Service-Type-and-Geography---Global-Forecast-to-2026---ResearchAndMarkets.com) at a CAGR of ~21.75% with a value expected to reach $66.84 billion by 2026. Another proof point of this growth is that [recent surveys show](https://www.datadoghq.com/container-report/#10) that the most popular container images are all based on open source software, which indicates this growing adoption trend of open source.\n\nAdopting open source into your business model is a complex decision and process. If you’re a successful company with a proprietary software product, it’s just a matter of time before a competitor with an open source offering will appear in your market segment. In this case, you will most likely need to switch your business model to one suited for open source software. For example, you will need to switch from license+subscription revenues to just subscription. Another big decision to make is whether or not to open source your software. Many software products that started as proprietary software converted to open source licensing, e.g. Adobe Flex, Visual Studio Code, .NET framework, PowerShell, Solaris. Open sourcing your software product usually goes hand-in-hand with adopting an open source business model of subscription-based revenues.\n\nYou also will need to contribute back to the open source community by making your enhancements and fixes to your product available in your open source project. In fact, to be successful in the open source market, you have to commit resources to help develop open source projects.”\n\n## 7. The open source community will grow significantly as a result of the acceleration of digital-first and cloud-native companies.\n\nSaavedra says: \n\n“The cloud helped accelerate the adoption of open source software because it allowed companies to scale up without incurring large costs in software licensing (open source subscription models are less expensive than proprietary software). Furthermore, open source software fosters collaboration among the brightest minds no matter where around the globe they reside, bringing together the power of the community and benefiting developers, organizations, and vendors alike. As a result, developers and organizations continue to adopt and contribute to open source projects due to a low entry barrier, accessibility, and cost. The Covid-19 pandemic [accelerated this adoption even more](https://venturebeat.com/2021/01/26/how-the-pandemic-is-accelerating-enterprise-open-source-adoption/) due to the switch to remote work by organizations that now have access to a new set of developer talent well versed in open source. The acceleration of digital-first and cloud-native companies will increase the use of open source, which will, in turn, demand more and more open source developers. The result will be an increase in the size of the open source community worldwide.”\n\n## 8. All-remote will become a prevailing work environment as a means to attract and retain talent.\n\nDarren Murph, Head of Remote, says:\n\n“All-remote and all-colocated will become the prevailing environments. Hybrid-remote will be broadly tested but will be rife with friction and dysfunction due to a lack of understanding in its implementation. The terminology also will evolve. For some organizations, hybrid will end up meaning ‘remote-first with an office for special events,’ while those who attempt to force knowledge workers into a more rigid in-office schedule will struggle to retain employees. \n\nDedicated leadership surrounding remote transitions and overall future-of-work strategy will increase in 2022. What GitLab pioneered has served as [a blueprint for organizations](/company/culture/all-remote/head-of-remote/) like Facebook, Dropbox, Okta, LinkedIn, VMWare, and other tech firms. Next year, industries beyond tech will begin to embrace remote work and create awareness for the intrinsic link between organizational design and talent brand. Organizations that rigidly force knowledge workers back into the office will see above-average attrition rates. With two years of remote work habits being ingrained, top talent will demand continued flexibility. Many organizations that have resisted investing in creating excellent remote work infrastructure will be forced to do so to compete with more flexible rivals. \n\nA well-built remote work plan will be seen as a hedge against future crises. Just as organizations are currently expected to have succession and security plans, having a remote work strategy will be critical to business continuity. Organizations will also need to work hard to establish psychological safety. As people resume social gatherings, employers have an opportunity to lean into the culture that is built outside of work and create strategies for that to be shared within the workplace.”",[9,875,682],{"slug":2399,"featured":6,"template":686},"devops-predictions-gitlab-experts-weigh-in-on-ai-security-remote-work-and-more","content:en-us:blog:devops-predictions-gitlab-experts-weigh-in-on-ai-security-remote-work-and-more.yml","Devops Predictions Gitlab Experts Weigh In On Ai Security Remote Work And More","en-us/blog/devops-predictions-gitlab-experts-weigh-in-on-ai-security-remote-work-and-more.yml","en-us/blog/devops-predictions-gitlab-experts-weigh-in-on-ai-security-remote-work-and-more",{"_path":2405,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2406,"content":2412,"config":2417,"_id":2419,"_type":14,"title":2420,"_source":16,"_file":2421,"_stem":2422,"_extension":19},"/en-us/blog/devops-stakeholder-buyin",{"title":2407,"description":2408,"ogTitle":2407,"ogDescription":2408,"noIndex":6,"ogImage":2409,"ogUrl":2410,"ogSiteName":670,"ogType":671,"canonicalUrls":2410,"schema":2411},"Need DevOps buy-in? Here's how to convince stakeholders","If you need to make the case for DevOps to a non-technical crowd, it's important to be prepared. Here's what you need to know.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681597/Blog/Hero%20Images/speedphoto.jpg","https://about.gitlab.com/blog/devops-stakeholder-buyin","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Need DevOps buy-in? Here's how to convince stakeholders\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-09-24\",\n      }",{"title":2407,"description":2408,"authors":2413,"heroImage":2409,"date":2414,"body":2415,"category":679,"tags":2416},[2002],"2020-09-24","\n\nWe know that DevOps is key to staying nimble in an increasingly competitive marketplace, but chances are your colleagues in finance or marketing aren’t as well-informed about software development.\n\nOne of the major challenges technology teams embedded in non-tech organizations face is convincing key business stakeholders to invest in cutting-edge methodologies such as [DevOps](/topics/devops/). Oftentimes, this challenge comes down to ineffective communication and misaligned incentives.\n\n\"Unfortunately, the divide between these incentives and the misalignment in these incentives is not exclusively held between developers and operators, the similar divide exists between the business and IT, in fact, in the business, they may not even be able to tell the difference between developers and operators, it's all IT to them,\" said [Nathen Harvey](https://twitter.com/nathenharvey), Developer Advocate from Google, at GitLab Virtual Commit. \"Much like from my perspective, it's just the business: finance, marketing, accounting, they all go together and blur in my head.\"\n\nThe best way to get stakeholders to buy-in to DevOps? Align incentives, think big picture, lead with empathy, and come prepared with evidence about the business value of DevOps.\n\n## Align incentives on your technology team\n\nBefore approaching the key decision-makers about investing in DevOps, make sure there is consensus among dev and ops about what direction you’re moving in. The tension between dev and ops teams is well documented: Developers tend to want greater agility, while operators want more stability.\n\n\"We turn to our developers and we say, 'Your job is to build and ship features as fast as possible, your job is agility,'\" said Nathen. \"And then we turn to our operators and we say, 'Your job (is to) make sure that the platform is stable, that nothing ever breaks.'\"\n\nThe good news is, DevOps is a way to have the best of both worlds.\n\nBefore he joined Google, Nathen worked for a retail company where his responsibility was to push the \"deploy\" button to ship new software updates every two weeks. There was a lot of ceremony around deployments, but there was also an office pool about how many of those changes would be rolled back.\n\nResearch by Google Cloud’s DevOps Research and Assessment (DORA) shows that teams that ship smaller features move faster while maintaining a more stable production environment, with numbers to prove it. When comparing the elite performers with the low performers, elite DevOps performers manage to balance speed and stability:\n\n*   Deploy code 208 times more frequently\n*   106 times faster from commit to deploy\n*   Changes are likely to fail just 1/7 of the time\n*   2604 times faster recovery time from incidents\n\nOnce you have developers and operators clamoring for DevOps, it’s time to move on to the next stakeholder tier.\n\n## Think about the business you work for\n\nGitLab is a software company, so we’re always thinking about new ways to deploy faster and more nimble code. If our developers found a new way to achieve this, we’re all ears. Most of our customers don't work for tech companies, but the most successful ones have found a way to make technology relevant to their business’ mission.\n\nFor example, [Delta Airlines found a way to go cloud native](/blog/delta-cloud-native/) because it fit into their mission of business agility. Whether you’re in transportation or e-commerce, business agility is something we can all agree on. Make a list of the top three priorities for your company and think about what your customers want (e.g., in the pandemic it may be an app with reliable curbside pick-up). Think about your company’s mission and business strategy and sketch out a compelling case for why DevOps will help your business edge out the competition.\n\n## Lead with empathy and think strategically\n\nBefore approaching your collaborators on the business side of things, put yourself in their shoes. Think in-depth about their motivations and goals to find the most compelling way to communicate with them.\n\nFirst, write your problem statement (e.g., \"I want to adopt a more agile DevOps strategy\"). Next, identify three key stakeholders across different teams on the business side of things (e.g., Max in Marketing, Alex in Accounting, and Lee in Legal). After that, conduct an informal thought exercise to enable more empathetic and strategic thinking:\n\n*   Look at their job description. What are their core responsibilities?\n*   Think about resourcing. What are their resource constraints?\n*   What is their level of influence over the decision? Grade their influence on a scale of one to five (one being low influence, five being high influence)\n*   How does helping your tech team be more agile impact their team’s performance and goals?\n\nIn the end, communicating with stakeholders about DevOps is all about finding common ground.\n\n## Close with evidence\n\nLet’s face it, the business side of your organization might not know the difference between a developer and an ops pro any more than you understand the intricacies of accounting, and that’s OK. So long as things aren’t broken, the gatekeepers are probably disinclined to fix it. But what if you can demonstrate just how much better things could be with a more [agile software delivery strategy](/solutions/agile-delivery/)?\n\nThe DORA team at Google created a [rigorous State of DevOps research program](https://www.devops-research.com/research.html) that assesses how different industries can improve software delivery. A simple five-question survey on five DevOps capabilities will rank your team into four tiers of performance – between low performer and elite performer.\n\nEvaluating your progress is key. Nathen's deployments at a previous employer had good \"time to restore\" rates but the fail change rate was between 16-30%, a metric that leaves a lot of room for improvement.\n\n\"We felt like we were doing really well, and in fact, we were, we had made a ton of great progress, but there were still lots of opportunities for us to improve,\" said Nathen. \"So using this quick check can help you and your team identify where are some opportunities for you to improve? How do you stand up against the others within your industry?\"\n\nIn the end, Nathen’s team ranked as a medium performer. So how does your team line-up? By coming prepared to the meeting with evidence on concrete ways a DevOps methodology can lead to more business agility, you are more likely to get the endorsement of key stakeholders on your plan.\n\n## Learn more about measuring software delivery\n\nLearn more about measuring DevOps by watching the keynote featuring Nathen and Dina Graves Portman from GitLab Virtual Commit. [Watch the other keynotes](https://www.youtube.com/playlist?list=PLFGfElNsQthYQaTiUPQcu4O0O20WHZksz), including a [presentation](https://youtu.be/xn_WP4K9dl8) by [GitLab CEO Sid Sijbrandij](/company/team/#sytses).\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/yUyZExE-5TU\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nCover image by [CHUTTERSNAP](https://unsplash.com/@chuttersnap?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/speed?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n",[9,1829,749],{"slug":2418,"featured":6,"template":686},"devops-stakeholder-buyin","content:en-us:blog:devops-stakeholder-buyin.yml","Devops Stakeholder Buyin","en-us/blog/devops-stakeholder-buyin.yml","en-us/blog/devops-stakeholder-buyin",{"_path":2424,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2425,"content":2430,"config":2436,"_id":2438,"_type":14,"title":2439,"_source":16,"_file":2440,"_stem":2441,"_extension":19},"/en-us/blog/devops-strategy",{"title":2426,"description":2427,"ogTitle":2426,"ogDescription":2427,"noIndex":6,"ogImage":1393,"ogUrl":2428,"ogSiteName":670,"ogType":671,"canonicalUrls":2428,"schema":2429},"Beyond CI/CD: GitLab's DevOps vision","How we're building GitLab into the complete DevOps toolchain.","https://about.gitlab.com/blog/devops-strategy","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Beyond CI/CD: GitLab's DevOps vision\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Pundsack\"}],\n        \"datePublished\": \"2017-10-04\",\n      }",{"title":2426,"description":2427,"authors":2431,"heroImage":1393,"date":2433,"body":2434,"category":299,"tags":2435},[2432],"Mark Pundsack","2017-10-04","\n\nWith GitLab 10.0, we shipped [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) for the Community and Enterprise\nEditions. Read on for an in-depth look at our strategy behind it, and beyond.\n\n\u003C!-- more -->\n\nI recently met with my colleagues\n[Joe](/company/team/#JAScheuermann) and\n[Courtland](/company/team/#mktinghipster) to give them the\nlowdown on GitLab's DevOps vision: where we've come from and where we're headed.\nYou can watch the video of our discussion or check out the lightly edited\ntranscript below. You can also jump into the rabbit hole, starting with the meta\nissue for [GitLab DevOps](https://gitlab.com/gitlab-org/gitlab-ce/issues/32639).\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/zMAB42g4MPI\" frameborder=\"0\" allowfullscreen>\u003C/iframe>\n\n\n## CI/CD: Where we've come from\n\n![CI/CD/Beyond CD](https://about.gitlab.com/images/blogimages/devops-strategy-ci-scope.svg)\n\nWhen I joined GitLab about a year ago, I created a [vision document for\nCI/CD](/direction/#ci--cd), and outlined a lot of the\nkey things that I thought were missing in [CI/CD in general](/topics/ci-cd/), and going beyond CD.\nI literally called one section \"beyond CD\" because I didn’t have a name for it\nthen.\n\nAnd in that document, I create an example pipeline to characterize all this\nstuff, to show how the pieces fit together into a development lifecycle.\n\n![Example pipeline](https://about.gitlab.com/images/blogimages/devops-strategy-example-pipeline.png){: .shadow}\n\nI love this diagram not only because it's complex and scary, but because when we\nstarted, we had maybe four boxes filled in, and now we have 10 or 12 filled in. To\nstart with, we had code management and, obviously, builds and tests. And we kind\nof did deployment, but not really.\n\nSince then, we’ve added review apps – a specific example of deployments – which\nis really awesome. We also added a more formalized mechanism for doing\ndeployments; actually recording deployments and deployment histories, keeping\ntrack of environments, and everything else. Then we added Canary Deployments in\n9.2 and code quality in 9.3. We added system monitoring with Prometheus in 9.0.\n\nWe don’t yet have what I called \"business monitoring,\" which could mean\nmonitoring revenue, or clicks, or whatever you care about; but that’s coming. We\ndon't yet have load testing, but the Prometheus team is thinking about that.\nWe don't yet have a plan for feature flags, but I think it's a really important\npart.\n\nAnd then we have this other dimension of pipelines, which is the relationship\nbetween different codebases (or projects), and in 9.3 we introduced the first\nversion of multi-project pipelines.\n\nSo we've gone from a core view of three or four boxes to where 90 percent is\ncomplete. That's pretty awesome.\n\nIt became obvious to me that we were viewing the scope with this hard line:\ndeveloper focused rather than an ops focused. For example, we’ll deploy into production,\nand we might even watch the metrics related to your code in production, but\nwe’re not going to monitor your entire production app, because that’s\noperations, and that’s clearly out of scope, right?\n\n## Where we're headed: Beyond CD\n\nWhat hit me a few months ago is, \"Why is that out of scope? That’s ridiculous.\nNo, we’re going to keep going. We're going to go past production into\noperations.\" Most of this still applies, but instead of just monitoring the\nsystem as it relates to a merge request, what about monitoring the system for\nnetwork errors, outages, or dependency problems? What if we don't stop at\nproduction, and monitor things that are typically ops related that may not\ninvolve a developer at all?\n\nThen I realized that this thing I called Beyond CD, maybe it's really [DevOps](/topics/devops/).\nMaybe the whole thing is DevOps.\n\n### The DevOps tool chain\n\nTo offer some context: DevOps is hard to define, because everybody defines it\nslightly differently. Sometimes DevOps is defined as the intersection of\ndevelopment, operations, and quality assurance.\n\n![DevOps Venn diagram](https://about.gitlab.com/images/blogimages/devops-strategy-venn-diagram.png){: .shadow}\n\n*\u003Csmall>Image by Rajiv.Pant, derived from Devops.png:, [CC BY 3.0](https://commons.wikimedia.org/w/index.php?curid=20202905)\u003C/small>*\n\nFor the most part, my personal interest in DevOps has been in that intersection.\nWe do great code management; we’ve done that for quite a while. How do we get\nthat code into production? How do we get it into QA?\n\nReview apps are a great example that fits squarely in that tiny, little triangle\nin the middle of the Venn diagram. You take your code, you deploy it, which is\nan operations thing, but you have it deployed in a temporary, ephemeral, app,\njust for QA people (or designers, product managers, or anyone who is not a\nprimary coder), so they can test your application for quality assurance, feature\nassurance, or whatever.\n\nBut now, I'm looking beyond the intersection. Here's the [DevOps tool chain\ndefinition](https://en.wikipedia.org/wiki/DevOps_toolchain) from Wikipedia:\n\n![DevOps Toolchain](https://about.gitlab.com/images/blogimages/devops-strategy-devops-toolchain.png){: .shadow}\n\n*\u003Csmall>Image by Kharnagy (Own work) [CC BY-SA 4.0](http://creativecommons.org/licenses/by-sa/4.0), via Wikimedia Commons\u003C/small>*\n\nWell, that’s everything! That’s not the intersection; that’s the union of\neverything from code, to releasing, to monitoring. And that's where things get\nconfusing. Sometimes when people talk about DevOps, they’re not talking about\nall of your code stuff. It’s the intersection parts that are the interesting\nparts of DevOps. It’s the parts where we let developers get their code into\nproduction easily. That slice, that intersection, of the Venn diagram, that’s\nthe interesting part about DevOps.\n\nHaving said that, as a product company, we are going to deliver things that are\npretty squarely on the development side, and, eventually, we’re going to deliver\nthings that are pretty squarely in the operations side. At some point, we may\nhave an operations dashboard that lets you understand your dependencies in your\nnetwork infrastructure, and your routers, and your whatever. That’s pretty far\nfetched at this point, but it could happen. Why not? Just have GitLab be\nyour one operations dashboard, and then it’s not just about the intersection of\nthe DevOps, it’s the whole DevOps tool chain.\n\nSo, that is the whirlwind, high-level summary of where we've been, and a little\nbit about where we’re going. Now let's get into specific issues.\n\n### The Ops Dashboard – [#1788](https://gitlab.com/gitlab-org/gitlab-ee/issues/1788)\n\nWe have a monitoring dashboard that's very developer centric. What about\ntaking that same content and slicing it from the operator's perspective? For a\nmoment, ignore all the stuff below, let’s just pretend there’s only the four\nboxes at the top:\n\n![Ops view of monitoring and deploy board](https://about.gitlab.com/images/blogimages/devops-strategy-monitoring-deploy-board.png){: .shadow}\n\nSo an operator might want to know, \"What’s the state of production?\" If I'm a\ndeveloper I can go into a project, into environments, see the production\nenvironment for that project, and I can see what the status is. But what if I\nwant to see all production environments? As an operations person, I care a\nlittle less about individual projects than I care about \"production.\" So this is\ngiving me the overview of \"production.\" All of these little boxes would\nrepresent production deploys of projects that you have in your GitLab\ninfrastructure.\n\nThe view is explicitly convoluted because we had just introduced sub-groups and\nI wanted to make sure this mechanism expanded. So ignore all the stuff below and\njust look at the top-level dashboards. Or maybe one level down, which is already\nstill pretty complicated, but let’s say your marketing organization had\ndifferent properties than your other developer operations; you’d be able to see\nreally quickly what the status is. If something’s red, you’d be able to click\ndown, and see details.\n\n![Ops view - service health](https://about.gitlab.com/images/blogimages/devops-strategy-service-health.png){: .shadow}\n\n![Ops view - pod health](https://about.gitlab.com/images/blogimages/devops-strategy-pod-health.png){: .shadow}\n\nYou’d be able to see graphs like this, which are similar to what we already\nprovide, but from the other angle. As a developer I’m looking at the deploy, and\nsaying, \"Oh, how did my deploy affect my performance?\" But this is saying,\n\"How’s production? Is anything wrong with my entire production suite?\"\n\nThis is really just scratching the surface of the ops views of things, but I\nthink it's going to become much more important as people embrace DevOps. You\nwant your developers to be talking the same language as your operations people.\nIn a lot of organizations, it’s already the same people – there are no separate\noperations people. Developers push code to production, and they're paged if\nsomething goes wrong. In others, developers and operators are separate, but they\nwant to work together towards DevOps.\n\nEither way, you want to be using the same tools. You want to be able to point\nto, for example, a memory bump that your operations people should also be able\nto see. But if they’re using completely different tools, like New Relic and\nDatadog, that kind of sucks. So let’s give them the same tools.\n\n### Pipeline view of environments – [#28698](https://gitlab.com/gitlab-org/gitlab-ce/issues/28698)\n\nI particularly love this proposal, and I really want to see this happen soon.\n\nThe environments page today is just a list of environments showing the last\ndeployment. The picture tells you who deployed, which is good, and you can see\nthat the commit is from the same SHA as staging, which is kind of nice. I can\nsee the deploy board, and if there's a deploy ongoing, I’m able to see the state\nas it rolls out. We don’t yet show you the current health of these pods; once\nthey're deployed, all we know is that they're deployed. This is how the\nenvironment view is today, and it's centered around deployments.\n\n![Environments list](https://about.gitlab.com/images/blogimages/devops-strategy-environments-list.png){: .shadow}\n*\u003Csmall>Current Environment view\u003C/small>*\n\nYou can click through to see the deployment history and this is actually really\nvaluable because I can see who deployed things, how long ago, and if something\nwent wrong in production I can really quickly roll back and let the developers\nhave some space to go and figure out what went wrong.\n\n![Deployment history](https://about.gitlab.com/images/blogimages/devops-strategy-deployment-history.png){: .shadow}\n*\u003Csmall>Current Deployment History view\u003C/small>*\n\nBut this proposal turns it around to have more of a DevOps view of the thing.\n\n![Pipeline view of environments](https://about.gitlab.com/images/blogimages/devops-strategy-pipeline-view-environments.png){: .shadow}\n*\u003Csmall>Proposed pipeline view of Environments\u003C/small>*\n\nThe idea is to take the same application, and instead of just looking at a list\nof environments, I’d be looking at columns with lots of review apps, and some\nnumber of staging environments, and a production environment. Instead of just\nshowing you the SHA, we would show you, for example, what merge requests have\nbeen merged into staging that are not yet in production. That’s a great\nmarriage of these two views, that you’d be able to see the diff between them.\n\nThis list, although it’s just a mockup, shows maybe the last five things that\nwere in production, or what was included in the last deploy, or whatever works\nbest for your environment. Showing what’s in the last deploy might be enough,\nbut for people who deploy 17 times a day, maybe that’s a little less useful, and\nwe just show history.\n\nBut then what about building in more of the operations kind of stuff, and\nsaying, \"Alright, what’s the state of my pods?\" Here we were flagging where the\nerror rate exceeded a threshold and there’s some alert that popped up. And here\nwe’re showing this automatic rollback kind of stuff, but basically just really\nbuilding on this ops view. Of course this is still a DevOps view, in the sense\nthat I’m looking at an individual project. So, one permutation of that would\nmarry that ops view of all of production. Or if I’m looking at a [microservices](/topics/microservices/)\nkind of thing, where there are five or 100 different projects, and I want to see\nthe status of all those really quickly. See\n[#28707](https://gitlab.com/gitlab-org/gitlab-ce/issues/28707).\n\n### Dependency security – [#28566](https://gitlab.com/gitlab-org/gitlab-ce/issues/28566)\n\nSo, here, the idea is that you've deployed something in production, and some\nmodule or something that you depend on has been updated, not by you, but by the\ncommunity, or someone else.\n\nThe easiest and most naive way to approach this is that with the next merge\nrequest, or next CI/CD run, we would go and check to see if anything’s outdated.\nAnd we might fail your CI/CD because of this.\n\nIt would make much more sense to run this stuff automatically. Even if, for\nexample, nobody pushes for seven days, and in the middle of that, there’s a\nsecurity release; just proactively run stuff and notify me. So, that's sort of a\nsecond iteration of thinking about how you would notify somebody, and tell them,\n\"Oh, you’ve got a security change. You should go in and do something about it.\"\n\nNow, the third iteration is, \"Well, what would you do with that information?\"\nYou’d go and maybe give it to your junior developer to go and make the change,\nand point to the new version. And then, of course, you need to test that it\nworks. So, you’re going to create a merge request, and then test it, to make\nsure that it still functions properly.\n\nWell, why notify somebody, and tell the junior developer to go and do this? Why\ndon’t we just do it for you? Why don’t we just go and submit the merge request\nfor you, and then tell you what the results are. And, in fact, let’s go further,\nand say, \"Hey it passed. We just deployed into production for you.\" Why would\nyou have security vulnerability in place any longer than necessary?\n\nAnd instead of having 100 alerts about 100 projects or microservices that all\nneed to get updated, you just get alerts about three of them that fail, that\nactually have some weird dependency that it didn’t work on. And then, you can\nfocus on real problems.\n\n![Dependency security](https://about.gitlab.com/images/blogimages/devops-strategy-dependency-security.png){: .shadow}\n\nSo, that’s a glimpse at how we’re thinking about this.\n\nThis would definitely be an enterprise-level feature. And again, we've fleshed\nout some ideas and it’s unscheduled, but it does really tie into the ops\nmindset.\n\n### Question: Enterprise Edition features\n\nCourtland: You mentioned that sort of automation would be an enterprise edition\nfeature. Can you talk a little bit more about why a smaller development team,\nlike under 100 developers, wouldn’t get value out of something like that?\n\nMark:\tSo, this is where things get a little tricky, because of course,\nsmaller developer teams would get value out of that too. Everybody would get\nvalue out of that. Some of it has to do with proportionality. One test I like to\nuse is: is there some other way you could achieve the same thing, using\nworkarounds, and we’re just making it easier? And that’s a good case, here. You\ncan already do this, but we’re going to automate it. And automation is something\nthat affects larger companies a lot more, because they’ve got hundreds of\nprojects, with thousands of developers. And they just can’t deal with the scale,\nor it’s worth dealing with the automation. Whereas, if you’ve got a small\ndeveloper, with a single project, you’re pretty much on top of it. And if\nsomething changes, yeah, you just go ahead and fix it; you’re aware of it. The\nbigger challenges are when you’re just not aware of how this thing might affect\none project that somebody’s almost forgotten about.\n\nThe other thing is that, just to be blunt, our concept that Enterprise Edition\nis only for more than X people, is a little flawed. It’s that it\napplies more to those companies, that those people value it more, and they’d be\nwilling to pay for it more, or however you judge your value there. Clearly,\nsmall companies would value all this automation, and everything else, but\nthey’re not going to get as much incremental value out of it, as a larger\ncompany would.\n\n~~The other way to look at it is that this is pretty advanced stuff, and frankly,\nit doesn’t deserve to be, free, open source. It’s probably really complicated\nstuff, and you’re going to have to pay there.~~ *[Editor's note: Advancedness is not a criteria in open sourcing or not open sourcing. There are advanced features that are open source, such as [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/). There are basic features that are proprietary, such as [File Locking](/solutions/file-locking/). The criteria we use to decide which version the features go in are documented on our [stewardship page](/company/stewardship/#what-features-are-paid-only).]* Maybe there’d be levels to it,\nright? There’d be a version that gives you an alert: we’ll run this test once a\nday. Or even just have a blog post about how to do this: you set up a recurring,\nscheduled pipeline job, once a day, to test if any of your dependencies have\nbeen updated. And you can do that today and then it would alert you. But to\nautomate it, to actually, create a merge request for you, and everything else?\nWell, that’s in the Enterprise feature. It’s not that version checking isn’t\nimportant for everybody, but the automation around it really, really matters for\nlarger companies. Does that make sense?\n\nCourtland:\tYeah, I mean, I think that the first way you described it, in that,\n\"Yeah, everyone gets some value out a feature like this, but the overwhelming\nvalue and use for this is in larger development teams,\" that resonated.\n\n### SLO and auto revert – [#1661](https://gitlab.com/gitlab-org/gitlab-ee/issues/1661)\n\nThis is a feature showing how we’re thinking about auto reverting something.\nWe’ve got canary deployments, and we have another feature we’re not currently\nworking on or scheduled, but it’s incremental rollout, so that you would not\njust rollout to a single canary, or a bucket of canaries, but it would slowly\nincrement: 1 percent, then 5 percent, then 25 percent. But let’s say, at some point, during my\nrollout, you detect an error.\n\n![Revert](https://about.gitlab.com/images/blogimages/devops-strategy-revert.png){: .shadow}\n\nThis a mockup of what it would look like. You’re like, \"Oh, error rates\nincreased by something above our threshold; let’s revert that one, go back, and\ncreate a new issue, and alert somebody to take a look at it.\" Lately, I’m\nthinking that I don’t know if I really want to automatically roll back, versus\njust stop it in its canary form, and say, \"Well, it’s canary. Let’s let canary\nbe there, so you can debug the canary, but just don’t let the canary go on\nfurther.\"\n\nError rate exceeding is a pretty tough one. But let’s say memory bumps up, and\nyou might be like, \"Yeah, we added something, and it’s using more memory, and\nwe’re okay with that. Don’t stop my deploy just because it’s using more memory.\"\nThere might need to be human intervention in there, but somewhere along this\nline we’re automating a lot of the deploy stuff.\n\n### Onboarding and adoption – [#32638](https://gitlab.com/gitlab-org/gitlab-ce/issues/32638)\n\nOnboarding and adoption is a really big issue, with lots of different ideas for\nhow to improve onboarding, how to get people actually using idea to production,\nimproving auto deploy. Not a lot of visuals, so I won’t really talk about it,\nbut it’s definitely one of our top priorities; the next most important thing\nwe’re working on.\n\n### Cloud development – [#32637](https://gitlab.com/gitlab-org/gitlab-ce/issues/32637)\n\nCloud development is the idea that setting up your local host machine is\nactually kind of a pain sometimes. Especially with microservices, where each\nservice can be in their own language, you don’t want to maintain Java, and Ruby,\nand Node, and all these other versions of dependencies, and every time something\nswitches, you’ve got to reinstall a new version of stuff. Or even these days,\nyou might develop on an iPad, and you don’t have a local host to compile things.\n\nCloud9 is the biggest, well known thing, from an IDE perspective, and Amazon\nbought them a little while ago. But even aside from the IDE portion of it, it’s\njust being able to develop in the cloud, and being able to make some changes,\nand then push them back; commit them to a repo.\n\nWe have a little bit of a demo like this, right now, with our web terminal. So,\nif you have Kubernetes, you see this terminal button, and it just pops up the\nterminal right in the staging server. And I can actually go ahead and edit a\nfile there, and... I just made a live change into my staging app.\n\nNow, generally speaking, I would not actually recommend you do that, because\nI’m messing with my staging app, that’s not what it's for. It makes an awesome\nlittle demo, but it’s not what you should do. What we want to do is come up with\na way that people could do that, but have it be not on your staging app, but in\nmaybe a dev environment that is specifically for this purpose. But that also,\nafter you make your changes, and test them, and run them live, you can then go\nand commit them back to [version control](/topics/version-control/), and close that loop. So there’s a whole\nbunch of issues related to that. And to be honest, it was what we were hoping\nthat Koding would have provided for us, and we have an integration\nwith them, but it hasn’t worked out, really, the way that we had hoped. And so,\nwe’re looking at alternatives, and we think we can probably do this ourselves.\n\nAnyway, that’s a big thing to flesh out.\n\n### GitLab PaaS – [#32820](https://gitlab.com/gitlab-org/gitlab-ce/issues/32820)\n\nHeroku is awesome, because it gives you this really great platform that’s easy\nto use, and gives you all this functionality on top of Amazon. Five or six years\nago it was super, brain-meltingly awesome to get people to do ops. For a\ndeveloper, I don’t have to be aware of how to do ops; Heroku just does ops\nfor us.\n\nGitLab PaaS is basically the idea that you’ve got a lot of these components, and\nwe’re not going to invent them all from scratch. We’re going to rely on\nKubernetes, for example. But on top of Kubernetes, we could make an awesome\nenvironment for ops. An ops environment, or a platform as a service. And so,\nthere’s an issue to discuss what it would take to do that. At some point in\ntime, this is a big item for us. If we can make it super really easy for you to\nfully manage your ops environment via GitLab, and maybe, for example, never\ntouch the Kubernetes dashboard; never touch any of the tools, just use the\nGitLab tools to do this. That’s pretty powerful.\n\nSort of related is an idea in the onboarding stuff, that on GitLab.com\nwe can actually provide you with a Kubernetes cluster; maybe a shared cluster. We\nhave to worry about security, of course. But imagine if you were a brand new\nuser on GitLab.com, and you push up an app, and you have nothing in there\nspecifically for GitLab, you just push up your code, and GitLab is like, \"Oh,\nthat’s a Ruby app. Okay, I know how to build Ruby apps. Oh, and I also know how\nto test Ruby apps. I’m just going to go and test them automatically for you.\"\nAnd, \"Oh, by the way, I know how to deploy this. I’m just going to go ahead and\ndeploy this to production.\" And we’ll make a\nproduction.project-name.ephemeral-gitlabapps.com, whatever the hell, some domain\nso that it’s not going to affect your actual production. But if you wanted to,\nyou would just point your DNS over to this production app, and you've got the\nproduction app running on GitLab infrastructure. And that’s, really, what Heroku\nprovided, right?\n\nBut that also is an onboarding thing for us to make it really easy. Because if\nwe want everybody to have CI, well, let’s turn it on for you. That’s pretty\nawesome. If we want everybody to have CD, we can’t just turn it on for you,\nbecause you have to have a place to deploy it to. So, if we just provided you a\nKubernetes cluster (\"everybody gets a cluster\"), then you just got a place. And,\nI mean, we’ll severely limit it. We’ll make it limited in some way, so that\nyou’re not going to run the production stuff for long there. Or if you do, you have\nto pay for it. But we’re not going to try and make money off of the production\nresources. We want to make money off of making it really easy. So, really, what\nwe want to do is encourage you to, then, go and spin up your own Kubernetes\ncluster, say, on Google. And we’ll make a nice little link that says, \"Go and\nspin up a cluster on GKE.\" We’ll make that really, really easy, but to make it\nsuper easy, for some number of days, we can just provide you that cluster,\nautomatically.\n\n### Feature flags – [#779](https://gitlab.com/gitlab-org/gitlab-ee/issues/779)\n\nFeature flags are really about decoupling delivery from deployment. It’s the\nidea that you make your code, you deploy it, but you haven’t turned it on, so\nit’s not delivered yet. And the idea there is that it means you can merge in the\nmain line, more often, because it’s not affecting anybody. And, also, it really\nhelps because you can do things like: when I do deliver, I can deliver it for\ncertain people; just GitLab employees or just the Beta group, and then I can\ncontrol that rollout. So then, if there's an error rate spike, well, it’s just\na few a people and I know who they are, and they’re going to complain to me.\nIt’s no big deal. But I can test things out, get it polished, fix the problems,\nbefore rolling it out. And then, you can also do things like, roll it out to 10 percent\nof the people, 50 percent of the people, whatever. It’s all about reducing risk, and\nimproving quality, and fundamentally about getting things into your mainline\nquicker. So, it’s ops-ish, in that sense, but it’s, really, still pretty fully\non dev.\n\n### Artifact management – [#2752](https://gitlab.com/gitlab-org/gitlab-ee/issues/2752)\n\nArtifact management has become a hot topic lately. We already have a container\nregistry for Docker image artifacts, and we also have file-based artifacts that\nyou can pass between jobs, and pass between pipelines, and even pass between\ncross project pipelines. And we have ways to download them, and browse them, but\nif those artifacts happen to be things like Maven or Ruby or node modules, and\nyou want to publish them, and then consume them in other pipelines, we don’t\nhave a formal way to do that.\n\nAnd you could, obviously, publish to the open source, RubyGems, for example. But\nif you want a private Gem, that is only consumed by your team... Maybe that's\nnot as big for Ruby developers, but Java developers do that all the time. A lot\nof Java developers use Artifactory or Sonatype Nexus. In order to complete the\nDevOps tool chain, we need to have some first class support for that, either by\nbundling in one of these other providers, or by adding layers, and APIs, on top\nof our existing artifacts. My personal pet favorite right now is, let’s say we\ncan just tag our existing artifact, and say, \"Oh, this is Maven type of\nartifact,\" and then we expose that via an API and so then you can declare that\nin another project, and it would just consume the APIs, and just know how to do\nthat. But it would also use our built-in authentication so you don’t have to set\nup creds and do all this declaration; you can be like, \"Oh, I’ve got access to\nthis project and this project, so I can get the artifacts, and I can consume it\nall really easily.\"\n\n### Auto DevOps – [#35712](https://gitlab.com/gitlab-org/gitlab-ce/issues/35712)\n\n*Note: We shipped the first iteration of Auto DevOps in [10.0](/releases/2017/09/22/gitlab-10-0-released/#auto-devops)*\n\nSo, let’s talk about Auto DevOps. This spans from the near-term to the very\nlong-term. It’s great that we do a lot of DevOps, and in a very simplistic way,\nit’s like, \"Oh, but shouldn’t we just make this stuff automatic?\" The way I\nphrase it is, we should provide the best practices in an easy and default way.\nYou can set up a GitLab CI YAML, but you have to actively go and do that. But,\nreally, every project should be running some kind of CI. So, why don’t we just\ndetect when you’ve pushed up a project; we’ll just build it, and we’ll go and\ntest it, because we know how to do testing. Today, with Auto Deploy, we already\nuse Auto Build, with build packs. We will automatically detect, I think, one of\nseven different languages, and automatically build your Java app, or Ruby, or\nNode... and we use Heroku’s build packs, actually, to do this build. And so we\nbuild that up, and when using Auto Deploy, we’ll go ahead and deploy that. You\nstill have to, obviously, have a Kubernetes cluster in order to do that, so it’s\nnot fully automated if you don’t have that. But if you’ve got Kubernetes, hey,\nthis is a literally one click. You pick from a menu, say, \"Oh, I’m on\nKubernetes,\" and then hit submit, and you’ve got Auto Deploy and Auto Build.\n\nBut one of the things we don’t have is Auto CI. And that’s a little annoying,\nbut it’s one of the things we want to pick up, and actually, hopefully our CTO,\nDmitriy, is going to pick that up in Q3; it's one of his OKRs. Heroku,\nthemselves, actually extended build packs to do testing, and so that means that\nthere’s at least five build packs that know how to test these languages. And so,\nhey, let’s use that. But even if that doesn’t work, there’s a lot of other\nthings we can do. Other companies have all this stuff automated, as well. So if\nwe can’t use Heroku CI, being able to say, \"Oh, this is this language; we know\nhow to test this language,\" we'll be making that automatic.\n\nAutomatic is multiple levels of things. Is it a wizard that configures this\nstuff for me? Is it one click checkbox, that says, \"Yes, turn on auto CI,\" or is\nit templates that I can easily add into my GitLab CI YAML? I think, in order to\nqualify as auto, what we have to do here is that it shouldn’t be templates. It\nshouldn’t be blog posts that tell me how to do it. That’s just CI. It should be,\nliterally, just \"I pushed and it worked;\" or at most a checkbox or two.\n\nLet’s go further, what other thing could we just automate here? And not automate\nstrictly for the purposes of automation, but about bringing best practices to\npeople. So, you have to actively work hard, to turn these things off. If you\ndon’t want CI, then shut it off, but by default you should have this.\n\nSo, this is a really, really long list of things that will take us forever to\nget to. The first ones have links, because we’re tracking real issues for this.\nAuto Metrics is a great one. If you’re running certain languages, you should\njust be able to, really easily, go and just pull the right information out of\nthere. But whatever, the list is huge.\n\nBut the idea is that we can build up this Auto DevOps, even the marketing term,\nand start talking about it in that way, and to not just say that GitLab is great\nfor your DevOps and is a complete DevOps tool chain. But, in fact, we do all\nthis stuff for you automatically.\n\nThere’s a lot to be done to make this fully automated. And what percentage of\nprojects can we really do? Auto Deploy is a great example that only works for\nweb apps. If it’s not a web app, we can’t just deploy it. What would it mean? We\ndeploy it, and it just wouldn’t function. If you made a command line app, what\nwould deploy even mean? Or if it’s a Maven, or really any kind of module that\nyou bundled up and released, that’s not the same thing as a deploy. So, maybe we\nneed an Auto Release. It’s not on this list, but maybe it should be. But within\nthe web app space, we can do some of this stuff automatically.\n\nSo that’s it. Everything you ever wanted to know about DevOps.\n",[728,9,976],{"slug":2437,"featured":6,"template":686},"devops-strategy","content:en-us:blog:devops-strategy.yml","Devops Strategy","en-us/blog/devops-strategy.yml","en-us/blog/devops-strategy",{"_path":2443,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2444,"content":2450,"config":2456,"_id":2458,"_type":14,"title":2459,"_source":16,"_file":2460,"_stem":2461,"_extension":19},"/en-us/blog/devops-tool-landscape",{"title":2445,"description":2446,"ogTitle":2445,"ogDescription":2446,"noIndex":6,"ogImage":2447,"ogUrl":2448,"ogSiteName":670,"ogType":671,"canonicalUrls":2448,"schema":2449},"The DevOps tool landscape","Competitive intelligence manager Mahesh Kumar describes the criteria we use when comparing GitLab to other DevOps tools.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670008/Blog/Hero%20Images/devops-tool-landscape.jpg","https://about.gitlab.com/blog/devops-tool-landscape","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The DevOps tool landscape\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mahesh Kumar\"},{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-11-01\",\n      }",{"title":2445,"description":2446,"authors":2451,"heroImage":2447,"date":2453,"body":2454,"category":679,"tags":2455},[2452,788],"Mahesh Kumar","2019-11-01","\nOne of the [core values](https://handbook.gitlab.com/handbook/values/) at GitLab is transparency, and it is in this spirit that we evaluate and articulate how GitLab fits into the competitive landscape. One of the ways we’ve demonstrated this transparency is by [listing other DevOps tools](/competition/) on our website and how they compare to functionality in GitLab. This approach is a little unorthodox but we believe this transparency not only helps teams make the right decisions, it also helps us identify where we can improve our product.\n\nFor any competitive comparison to be effective, it has to be fair, accurate, and easy to understand. Whether we’re comparing [three versions of Jenkins](/blog/jenkins-one-year-later/) to GitLab CI/CD, or comparing other [DevOps tools](/topics/devops/devops-tools-explained/) in the SDLC, we try to ensure these three key objectives of competitive comparisons are achieved.\n\n## Staying fair\n\nOne of the biggest challenges in competitive comparisons is staying fair and credible. The selection of competitive comparison criteria plays a significant role because it has to be comprehensive and not self-serving. Far too often vendors restrict competitive comparison criteria to what their product does well and avoid the gaps that might be in their products. At GitLab, we make a concerted effort to avoid this pitfall, and our culture of transparency keeps us honest in our assessment of where we excel and where we can do better.\n\nThe [GitLab Maturity Framework](/direction/maturity/) articulates the stages, categories, and features that constitute the end-to-end DevOps lifecycle. The maturity framework shows where GitLab provides an elevated user experience and also outlines our planned roadmap for the future. Since this framework takes a long-term view of criteria/features that constitute various DevOps stages and categories, we use this framework as a guide for our competitive comparisons.\n\nIn our GitLab Maturity Framework, we have a few categories where we rank as one of the best-in-class, both with industry analysts and GitLab users: Source code management, code review, and continuous integration (CI). To see one of these comparisons, check out our Jenkins CI page where we outline features, pricing, and a comprehensive overview.\n\n[Jenkins vs. GitLab](/devops-tools/jenkins-vs-gitlab/)\n{: .alert .alert-gitlab-purple .text-center}\n\n## Keeping it accurate\n\nHaving settled on criteria for evaluation, getting the data accurate is a major challenge. We have a structured information gathering process as laid out below:\n\n    1. Website\n    2. Documentation\n    3. Demos\n    4. Product install and usage\n    5. Customer feedback\n\nSometimes we are unable to complete this process for all vendor products for several reasons. First is the lack of available information either on a vendor's website or documentation. Second, we may be unable to access their product to validate certain capabilities. Some vendors do not provide a free or easily accessible version of the product, while others may explicitly prohibit the use of their product for comparison purposes. In either case, we restrict our comparison to publicly available details.\n\nThe second challenge in ensuring accuracy is that vendors don't always put out new releases and capabilities on a constant basis and our analysis may be slightly outdated. One of the best examples of this is, “when does one stop [painting the Golden Gate Bridge](http://goldengatebridge.org/research/facts.php#PaintHowOften)?” The answer is never! It’s an ongoing process that requires continuous paint touch-ups from one end to the other.\n\n## Everyone can contribute\n\nOur open source DNA extends to how we manage the tools landscape pages. We freely solicit input internally from multiple teams within GitLab and more importantly from other vendors’ teams. Anyone, including other vendors, can use GitLab to create an issue stating the change they wish to see or information they would like to correct. This issue is then assigned to the appropriate GitLab team to address. In fact, one Product Manager from a vendor recently contacted us about a change to their comparison page, and we gladly made that change.\n\nBy providing an opportunity to comment and give feedback, we hope to foster a dialog with those better informed about different products, thereby improving the tools landscape pages with rich and accurate information.\n\n## Easy to understand\n\nThe final challenge in comparison pages is to make them easy to interpret. We do this in two different ways: First, all the feature-level comparison is listed in the comparison page. For those interested in a particular feature or capability, they can easily scan the page to find the feature they’re looking for.\n\nSometimes the feature details need explanation, or perhaps there’s a feature that doesn’t quite fit into the “yes or no” mold. For that reason, we also provide a top-down analysis at the start of most comparison pages that provides a summary of features and provides additional context. This sometimes means a critical feature can get lost in the text, but we are doing our best to keep consistency across vendors and identify discrepancies quickly.\n\nThere are a lot of DevOps tools out there. As a complete [DevOps platform](/solutions/devops-platform/) delivered as a single application, GitLab can remove the pain of having to choose, integrate, learn, and maintain the multitude of tools necessary for a successful DevOps toolchain. If a DevOps tool is missing, feel free to [email us](mailto:incoming+gitlab-com-marketing-product-marketing-7424125-issue-@incoming.gitlab.com?subject=DevOps%20tool%20request&amp;amp;bcc=devopstools%40gitlab.com&amp;amp;body=-%20Tool%20name%3A%0D%0A-%20Stages%3A%0D%0A-%20Change%3A%0D%0A%0D%0A%0D%0APlease%20leave%20these%20label%20flags.%20%20%20%20%0D%0A%2Flabel%20~comparison%20~Servicedesk) or [create an issue](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#create-a-new-issue) and we’ll be happy to add a feature comparison for that product.\n\nCover image by [Troy Nikolic](https://unsplash.com/@troynikolic?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[109,9,728],{"slug":2457,"featured":6,"template":686},"devops-tool-landscape","content:en-us:blog:devops-tool-landscape.yml","Devops Tool Landscape","en-us/blog/devops-tool-landscape.yml","en-us/blog/devops-tool-landscape",{"_path":2463,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2464,"content":2470,"config":2477,"_id":2479,"_type":14,"title":2480,"_source":16,"_file":2481,"_stem":2482,"_extension":19},"/en-us/blog/devops-workflows-json-format-jq-ci-cd-lint",{"title":2465,"description":2466,"ogTitle":2465,"ogDescription":2466,"noIndex":6,"ogImage":2467,"ogUrl":2468,"ogSiteName":670,"ogType":671,"canonicalUrls":2468,"schema":2469},"JSON formatting and CI/CD linting tips for DevOps workflows","Learn how to filter in JSON data structures and interact with the REST API. Use the GitLab API to lint your CI/CD configuration and dive into Git hooks speeding up your workflows.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681979/Blog/Hero%20Images/gert-boers-unsplash.jpg","https://about.gitlab.com/blog/devops-workflows-json-format-jq-ci-cd-lint","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tips for productive DevOps workflows: JSON formatting with jq and CI/CD linting automation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2021-04-21\",\n      }",{"title":2471,"description":2466,"authors":2472,"heroImage":2467,"date":2474,"body":2475,"category":791,"tags":2476},"Tips for productive DevOps workflows: JSON formatting with jq and CI/CD linting automation",[2473],"Michael Friedrich","2021-04-21","\n\n## What is JSON linting?\n\nTo understand JSON linting, let’s quickly break down the two concepts of JSON and linting. \n\n***JSON*** is an acronym for JavaScript Object Notation, which is a lightweight, text-based, open standard format designed specifically for representing structured data based on the JavaScript object syntax. It is most commonly used for transmitting data in web applications. It parses data faster than XML and is easy for humans to read and write.\n\n***Linting*** is a process that automatically checks and analyzes static source code for programming and stylistic errors, bugs and suspicious constructs. \n\nJSON has become popular because it is human-readable and doesn’t require a complete markup structure like XML. It is easy to analyze into logical syntactic components, especially in JavaScript. It also has many JSON libraries for most programming languages.\n\n### Benefits of JSON linting\n\nFinding an error in JSON code can be challenging and time-consuming. The best way to find and correct errors while simultaneously saving time is to use a linting tool. When Json code is copied and pasted into the linting editor, it validates and reformats Json. It is easy to use and supports a wide range of browsers, so applications development with Json coding don’t require a lot of effort to make them browser-compatible.\n\nJSON linting is an efficient way to reduce errors and it improves the overall quality of the JSON code. This can help accelerate development and reduce costs because errors are discovered earlier.\n\n### Some common JSON linting errors\n\nIn instances where a JSON transaction fails, the error information is conveyed to the user by the API gateway. By default, the API gateway returns a very basic fault to the client when a message filter has failed.\n\nOne common JSON linting error is parsing. A “parse: unexpected character\" error occurs when passing a value that is not a valid JSON string to the JSON. parse method, for example, a native JavaScript object. To solve the error, make sure to only pass valid JSON strings to the JSON.\n\nAnother common error is NULL or inaccurate data errors, not using the right data type per column or extension for JSON files, and not ensuring every row in the JSON table is in the JSON format.\n\n### How to fix JSON linting errors\n\nIf you encounter a NULL or inaccurate data error in parsing, the first step is to make sure you use the right data type per column. For example, in the case of “age,” use 12 instead of twelve.\n\nAlso make sure you are using the right extension for JSON files. When using a compressed JSON file, it must end with “json” followed by the extension of the format, such as “.gz.”\n\nNext, make sure the JSON format is used for every row in the JSON table. Create a table with a delimiter that is not in the input files. Then, run a query equivalent to the return name of the file, row points and the file path for the null NSON rows.\n\nSometimes you may find files that are not your source code files, but ones generated by the system when compiling your project. In that instance, when the file has a .js extension, the ESLint needs to exclude that file when searching for errors. One method of doing this is by using ‘IgnorePatterns:’ in .eslintrc.json file either after or before the “rules” tag.\n\n“ignorePatterns”: [“temp.js”, “**/vendor/*.js”],\n\n“rules”: {\n\nAlternatively, you can create a separate file named‘.eslintignore’ and incorporate the files to be excluded as shown below :\n**/*.js\nIf you opt to correct instead of ignore, look for the error code in the last column. Correct all the errors in one fule and rerun ‘npx eslint . >errfile’ and ensure all the errors of that type are cleared. Then look for the next error code and repeat the procedure until all errors are cleared.\n\nOf course, there will be instances when you won’t understand an error, so in that case, open [https://eslint.org/docs/user-guide/getting-started](https://eslint.org/docs/user-guide/getting-started) and type the error code in the ‘Search’ field on the top of the document. There you will find very detailed instructions as to why that error is raised and how to fix it.\n\nFinally, you can forcibly fix errors automatically while generating the error list using:\n\nNpx eslintrc . — fix \n\nThis is not recommended until you become more well-versed with lint errors and how to fix them. Also, you should keep a backup of the files you are linting because while fixing errors, certain code may get overwritten, which could cause your program to fail.\n\n## JSON linting best practices\n\nHere are some tips for helping your consumers use your output:\n\nFirst, always enclose the **Key** **:** **Value** pair within **double quotes**. It may be convenient (not sure how) to generate with Single quotes, but JSON parser don’t like to parse JSON objects with single quotes.\n\nFor numerical values, quotes are optional but it is a good idea to enclose them in double quotes.\n\nNext, don’t ever use hyphens in your key fields because it breaks python and scala parser. Instead use underscores (_). \n\nIt’s a good idea to always create a root element, especially when you’re creating a complicated JSON.\n\n\nModern web applications come with a REST API which returns JSON. The format needs to be parsed, and often feeds into scripts and service daemons polling the API for automation.\n\nStarting with a new REST API and its endpoints can often be overwhelming. Documentation may suggest looking into a set of SDKs and libraries for various languages, or instruct you to use `curl` or `wget` on the CLI to send a request. Both CLI tools come with a variety of parameters which help to download and print the response string, for example in JSON format.\n\nThe response string retrieved from `curl` may get long and confusing. It can require parsing the JSON format and filtering for a smaller subset of results. This helps with viewing the results on the CLI, and minimizes the data to process in scripts. The following example retrieves all projects from GitLab and returns a paginated result set with the first 20 projects:\n\n```shell\n$ curl \"https://gitlab.com/api/v4/projects\"\n```\n\n![Raw JSON as API response](https://about.gitlab.com/images/blogimages/devops-workflows-json-format-jq-ci-cd-lint/gitlab_api_response_raw_json.png){: .shadow}\n\nThe [GitLab REST API documentation](https://docs.gitlab.com/ee/api/#how-to-use-the-api) guides you through the first steps with error handling and authentication. In this blog post, we will be using the [Personal Access Token](https://docs.gitlab.com/ee/api/#personalproject-access-tokens) as the authentication method. Alternatively, you can use [project access tokens](https://docs.gitlab.com/ee/user/project/settings/project_access_tokens.html) for [automated authentication](https://docs.gitlab.com/ee/api/#authentication) that avoids the use of personal credentials.\n\n### REST API authentication\n\nSince not all endpoints are accessible with anonymous access they might require authentication. Try fetching user profile data with this request:\n\n```shell\n$ curl \"https://gitlab.com/api/v4/user\"\n{\"message\":\"401 Unauthorized\"}\n```\n\nThe API request against the `/user` endpoint requires to pass the personal access token into the request, for example, as a request header. To avoid exposing credentials on the terminal, you can export the token and its value into the user's environment. You can automate the variable export with ZSH and the [.env plugin](https://github.com/ohmyzsh/ohmyzsh/tree/master/plugins/dotenv) in your shell environment. You can also source the `.env` once in the existing shell environment.\n\n```shell\n$ vim ~/.env\n\nexport GITLAB_TOKEN=”...”\n\n$ source ~/.env\n```\n\nScripts and commands being run in your shell environment can reference the `$GITLAB_TOKEN` variable. Try querying the user API endpoint again, with adding the authorization header into the request:\n\n```shell\n$ curl -H \"Authorization: Bearer $GITLAB_TOKEN\" \"https://gitlab.com/api/v4/user\"\n```\n\nA reminder that only administrators can see the attributes of all users, and the individual can only see their user profile – for example, `email` is hidden from the public domain.\n\n### How to request responses in JSON\n\nThe [GitLab API provides many resources](https://docs.gitlab.com/ee/api/api_resources.html) and URL endpoints. You can manage almost anything with the API that you’d otherwise configure using the graphic user interface.\n\nAfter sending the [API request](https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_message), the [response message](https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Response_message) contains the body as string, for example as a [JSON content type](https://docs.gitlab.com/ee/api/#content-type). `curl` can provide more information about the response headers which is helpful for debugging. Multiple verbose levels enable the full debug output with `-vvv`:\n\n```shell\n$ curl -vvv \"https://gitlab.com/api/v4/projects\"\n[...]\n* SSL connection using TLSv1.2 / ECDHE-RSA-CHACHA20-POLY1305\n* ALPN, server accepted to use h2\n* Server certificate:\n*  subject: CN=gitlab.com\n*  start date: Jan 21 00:00:00 2021 GMT\n*  expire date: May 11 23:59:59 2021 GMT\n*  subjectAltName: host \"gitlab.com\" matched cert's \"gitlab.com\"\n*  issuer: C=GB; ST=Greater Manchester; L=Salford; O=Sectigo Limited; CN=Sectigo RSA Domain Validation Secure Server CA\n*  SSL certificate verify ok.\n[...]\n> GET /api/v4/projects HTTP/2\n> Host: gitlab.com\n> User-Agent: curl/7.64.1\n> Accept: */*\n[...]\n\u003C HTTP/2 200\n\u003C date: Mon, 19 Apr 2021 11:25:31 GMT\n\u003C content-type: application/json\n[...]\n[{\"id\":25993690,\"description\":\"project for adding issues\",\"name\":\"project-for-issues-1e1b6d5f938fb240\",\"name_with_namespace\":\"gitlab-qa-sandbox-group / qa-test-2021-04-19-11-13-01-d7d873fd43cd34b6 / project-for-issues-1e1b6d5f938fb240\",\"path\":\"project-for-issues-1e1b6d5f938fb240\",\"path_with_namespace\":\"gitlab-qa-sandbox-group/qa-test-2021-04-19-11-13-01-d7d873fd43cd34b6/project-for-issues-1e1b6d5f938fb240\"\n\n[... JSON content ...]\n\n\"avatar_url\":null,\"web_url\":\"https://gitlab.com/groups/gitlab-qa-sandbox-group/qa-test-2021-04-19-11-12-56-7f3128bd0e41b92f\"}}]\n* Closing connection 0\n```\n\nThe `curl` command output provides helpful insights into TLS ciphers and versions, the request lines starting with `>` and response lines starting with `\u003C`. The response body string is encoded as JSON.\n\n### How to see the structure of the returned JSON\n\nTo get a quick look at the structure of the returned JSON file, try these tips:\n\n* Enclose square brackets to identify an array `[ …. ]`.\n* Enclose curly brackets identify a [dictionary](https://en.wikipedia.org/wiki/Associative_array) `{ … }`. Dictionaries are also called associative arrays, maps, etc.\n* `”key”: value` indicates a key-value pair in a dictionary, which is identified by curly brackets enclosing the key-value pairs.\n\nThe values in [JSON](https://en.wikipedia.org/wiki/JSON) consist of specific types - a string value is put in double-quotes. Boolean true/false, numbers, and floating-point numbers are also present as types. If a key exists but its value is not set, REST APIs often return `null`.\n\nVerify the data structure by running \"linters\". Python's JSON module can parse and lint JSON strings. The example below misses a closing square bracket to showcase the error:\n\n```shell\n$ echo '[{\"key\": \"broken\"}' | python -m json.tool\nExpecting object: line 1 column 19 (char 18)\n```\n\n[jq](https://stedolan.github.io/jq/) – a lightweight and flexible CLI processor – can be used as a standalone tool to parse and validate JSON data.\n\n```shell\n$ echo '[{\"key\": \"broken\"}' | jq\nparse error: Unfinished JSON term at EOF at line 2, column 0\n```\n\n[`jq` is available](https://stedolan.github.io/jq/download/) in the package managers of most operating systems.\n\n```shell\n$ brew install jq\n$ apt install jq\n$ dnf install jq\n$ zypper in jq\n$ pacman -S jq\n$ apk add jq\n```\n\n### Dive deep into JSON data structures\n\nThe true power of `jq` lies in how it can be used to parse JSON data:\n\n> `jq` is like `sed` for JSON data. It can be used to slice, filter, map, and transform structured data with the same ease that `sed`, `awk`, `grep` etc., let you manipulate text.\n\nThe output below shows how it looks to run the request against the project API again, but this time, the output is piped to `jq`.\n\n```shell\n$ curl \"https://gitlab.com/api/v4/projects\" | jq\n[\n  {\n    \"id\": 25994891,\n    \"description\": \"...\",\n    \"name\": \"...\",\n\n[...]\n\n    \"forks_count\": 0,\n    \"star_count\": 0,\n    \"last_activity_at\": \"2021-04-19T11:50:24.292Z\",\n    \"namespace\": {\n      \"id\": 11528141,\n      \"name\": \"...\",\n\n[...]\n\n    }\n  }\n]\n```\n\nThe first difference is the format of the JSON data structure, so-called [pretty-printed](https://en.wikipedia.org/wiki/Prettyprint). New lines and indents in data structure scopes help your eyes and allow you to identify the inner and outer data structures involved. This format is needed to determine which `jq` filters and methods you want to apply next.\n\n#### About arrays and dictionaries\n\nThe set of results from an API often is returned as a list (or \"array\") of items. An item itself can be a single value or a JSON object. The following example mimics the response from the GitLab API and creates an array of dictionaries as a nested result set.\n\n```shell\n$ vim result.json\n[\n  {\n    \"id\": 1,\n    \"name\": \"project1\"\n  },\n  {\n    \"id\": 2,\n    \"name\": \"project2\"\n  },\n  {\n    \"id\": 3,\n    \"name\": \"project-internal-dev\",\n    \"namespace\": {\n      \"name\": \"🦊\"\n    }\n  }\n]\n```\n\nUse `cat` to print the file content on stdout and pipe it into `jq`. The outer data structure is an array – use `-c .[]` to access and print all items.\n\n```shell\n$ cat result.json | jq -c '.[]'\n{\"id\":1,\"name\":\"project1\"}\n{\"id\":2,\"name\":\"project2\"}\n{\"id\":3,\"name\":\"project-internal-dev\",\"namespace\":{\"name\":\"🦊\"}}\n```\n\n### How to filter data structures with `jq`\n\nFilter items by passing `| select (...)` to `jq`. The filter takes a lambda callback function as a comparator condition. When the item matches the condition, it is returned to the caller.\n\nUse the dot indexer `.` to access dictionary keys and their values. Try to filter for all items where the name is `project2`:\n\n```shell\n$ cat result.json | jq -c '.[] | select (.name == \"project2\")'\n{\"id\":2,\"name\":\"project2\"}\n```\n\nPractice this example by selecting the `id` with the value `2` instead of the `name`.\n\n#### Filter with matching a string\n\nDuring tests, you may need to match different patterns instead of knowing the full name. Think of projects that match a specific path or are located in a group where you only know the prefix. Simple string matches can be achieved with the `| contains (...)` function. It allows you to check whether the given string is inside the target string – which requires the selected attribute to be of the string type.\n\nFor a filter with the select chain, the comparison condition needs to be changed from the equal operator `==` to checking the attribute `.name` with `| contains (\"dev\")`.\n\n```shell\n$ cat result.json | jq -c '.[] | select (.name | contains (\"dev\") )'\n{\"id\":3,\"name\":\"project-internal-dev\",\"namespace\":{\"name\":\"🦊\"}}\n```\n\nSimple matches can be achieved with the `contains` function.\n\n#### Filter with matching regular expressions\n\nFor advanced string pattern matching, it is recommended to use regular expressions. `jq` provides the [test function for this use case](https://stedolan.github.io/jq/manual/#RegularexpressionsPCRE). Try to filter for all projects which end with a number, represented by `\\d+`. Note that the backslash `\\` needs to be escaped as `\\\\` for shell execution. `^` tests for beginning of the string, `$` is the ending check.\n\n```shell\n$ cat result.json | jq -c '.[] | select (.name | test (\"^project\\\\d+$\") )'\n{\"id\":1,\"name\":\"project1\"}\n{\"id\":2,\"name\":\"project2\"}\n```\n\nTip: You can [test and build the regular expression with regex101](https://regex101.com/) before test-driving it with `jq`.\n\n#### Access nested values\n\nKey value pairs in a dictionary may have a dictionary or array as a value. `jq` filters need to take this factor into account when filtering or transforming the result. The example data structure provides `project-internal-dev` which has the key `namespace` and a value of a dictionary type.\n\n```shell\n  {\n    \"id\": 3,\n    \"name\": \"project-internal-dev\",\n    \"namespace\": {\n      \"name\": \"🦊\"\n    }\n  }\n```\n\n`jq` allows the user to specify the [array and dictionary types](https://stedolan.github.io/jq/manual/#TypesandValues) as `[]` and `{}` to be used in select chains with greater and less than comparisons. The `[]` brackets select filters for non-empty dictionaries for the `namespace` attribute, while the `{}` brackets select for all `null` (raw JSON) values.\n\n```shell\n$ cat result.json | jq -c '.[] | select (.namespace >={} )'\n{\"id\":3,\"name\":\"project-internal-dev\",\"namespace\":{\"name\":\"🦊\"}}\n\n$ cat result.json | jq -c '.[] | select (.namespace \u003C={} )'\n{\"id\":1,\"name\":\"project1\"}\n{\"id\":2,\"name\":\"project2\"}\n```\n\nThese methods can be used to access the name attribute of the namespace, but only if the namespace contains values. Tip: You can chain multiple `jq` calls by piping the result into another `jq` call. `.name` is a subkey of the primary `.namespace` key.\n\n```shell\n$ cat result.json | jq -c '.[] | select (.namespace >={} )' | jq -c '.namespace.name'\n\"🦊\"\n```\n\nThe additional select command with non-empty namespaces ensures that only initialized values for `.namespace.name` are returned. This is a safety check, and avoids receiving `null` values in the result you would need to filter again.\n\n```shell\n$ cat result.json| jq -c '.[]' | jq -c '.namespace.name'\nnull\nnull\n\"🦊\"\n```\n\nBy using the additional check with `| select (.namespace >={} )`, you only get the expected results and do not have to filter empty `null` values.\n\n### How to expand the GitLab endpoint response\n\nSave the result from the API projects call and retry the examples above with `jq`.\n\n```shell\n$ curl \"https://gitlab.com/api/v4/projects\" -o result.json 2&>1 >/dev/null\n```\n\n### Validate CI/CD YAML with `jq` for Git hooks\n\nWhile writing this blog post, I learned that you can [escape and encode YAML into JSON with `jq`](https://docs.gitlab.com/ee/api/lint.html#escape-yaml-for-json-encoding). This trick comes in handy when automating YAML linting on the CLI, for example as a Git pre-commit hook.\n\nLet’s take a look at the simplest way to test GitLab CI/CD from our [community meetup workshops](https://gitlab.com/gitlab-de/swiss-meetup-2021-jan#resources). A common mistake with the first steps of the process can be missing the two spaces indent or missing whitespace between the dash and following command. The following examples use `.gitlab-ci.error.yml` as a filename to showcase errors and `.gitlab-ci.main.yml` for working examples.\n\n```shell\n$ vim .gitlab-ci.error.yml\n\nimage: alpine:latest\n\ntest:\nscript:\n  -exit 1\n```\n\nCommitting the change and waiting for the CI/CD pipeline to validate at runtime can be time-consuming. The [GitLab API provides a resource endpoint /ci/lint](https://docs.gitlab.com/ee/api/lint.html#validate-the-ci-yaml-configuration). A POST request with JSON-encoded YAML content will return a linting result faster.\n\n#### Parse CI/CD YAML into JSON with jq\n\nYou can use jq to parse the raw YAML string into JSON:\n\n```shell\n$ jq --raw-input --slurp \u003C .gitlab-ci.error.yml\n\"image: alpine:latest\\n\\ntest:\\nscript:\\n  -exit 1\\n\"\n```\n\nThe `/ci/lint` API endpoint requires a JSON dictionary with `content` as key, and the raw YAML string as a value. You can use `jq` to format the input by using the arg parser:\n\n```shell\n§ jq --null-input --arg yaml \"$(\u003C.gitlab-ci.error.yml)\" '.content=$yaml'\n{\n  \"content\": \"image: alpine:latest\\n\\ntest:\\nscript:\\n  -exit 1\"\n}\n```\n\n#### Send POST request to /ci/lint\n\nThe next building block is to [send a POST request to the /ci/lint](https://docs.gitlab.com/ee/api/lint.html#validate-the-ci-yaml-configuration). The request needs to specify the `Content-Type` header for the body. With using the pipe `|` character, the JSON-encoded YAML configuration is fed into the curl command call.\n\n```shell\n$ jq --null-input --arg yaml \"$(\u003C.gitlab-ci.error.yml)\" '.content=$yaml' \\\n| curl \"https://gitlab.com/api/v4/ci/lint?include_merged_yaml=true\" \\\n--header 'Content-Type: application/json' --data @-\n{\"status\":\"invalid\",\"errors\":[\"jobs test config should implement a script: or a trigger: keyword\",\"jobs script config should implement a script: or a trigger: keyword\",\"jobs config should contain at least one visible job\"],\"warnings\":[],\"merged_yaml\":\"",[976,9,978],{"slug":2478,"featured":6,"template":686},"devops-workflows-json-format-jq-ci-cd-lint","content:en-us:blog:devops-workflows-json-format-jq-ci-cd-lint.yml","Devops Workflows Json Format Jq Ci Cd Lint","en-us/blog/devops-workflows-json-format-jq-ci-cd-lint.yml","en-us/blog/devops-workflows-json-format-jq-ci-cd-lint",{"_path":2484,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2485,"content":2491,"config":2496,"_id":2498,"_type":14,"title":2499,"_source":16,"_file":2500,"_stem":2501,"_extension":19},"/en-us/blog/devsecops-faq-get-up-to-speed-on-this-hot-devops-area",{"title":2486,"description":2487,"ogTitle":2486,"ogDescription":2487,"noIndex":6,"ogImage":2488,"ogUrl":2489,"ogSiteName":670,"ogType":671,"canonicalUrls":2489,"schema":2490},"DevSecOps FAQ: Get up to speed","There's more to dev, sec and ops than meets the eye, particularly when they're combined. Here's what you need to know about DevSecOps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669784/Blog/Hero%20Images/security-testing-principles-devs.jpg","https://about.gitlab.com/blog/devsecops-faq-get-up-to-speed-on-this-hot-devops-area","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevSecOps FAQ: Get up to speed\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2021-12-08\",\n      }",{"title":2486,"description":2487,"authors":2492,"heroImage":2488,"date":2493,"body":2494,"category":679,"tags":2495},[851],"2021-12-08","\n\nIf it feels like [DevSecOps](/topics/devsecops/) is just one more flavor of DevOps, we get it. After all, DevOps could be known as _DevSecBizTestMonitorOps_, but that’s not easy to say or remember. DevSecOps actually plays a unique role in the world of software development. Here’s what you need to know.\n\n## Why is DevSecOps important?\n\nAll of the [well-publicized security breaches](/blog/are-you-ready-for-the-newest-era-of-devsecops/) have shown us one thing: Security can no longer be an afterthought in software development. It used to be that security was a separate department and function with a top-down approach and little actual understanding of how software was developed. Code was handed to security late in the process, and then the sec team had to chase busy devs down for fixes. TL;DR: Let’s just say that didn’t ever work well.\n\nToday, DevSecOps aims squarely at the idea that security has to be baked into the process from the beginning. The need for security to [“shift left,”](/blog/efficient-devsecops-nine-tips-shift-left/) i.e., move from production to development, is at the heart of what DevSecOps is. \n\nThe data is clear: The earlier a developer finds a flaw, the faster the fix, so DevSecOps puts security scans (and their results) in a dev’s workflow, minimizing the barriers to resolution and greatly decreasing context-switching. \n\nAnd this isn’t just something that’s a nice-to-have – it’s actually happening. In our [2021 Global DevSecOps Survey](/developer-survey), we found DevSecOps teams are running more [SAST](https://docs.gitlab.com/ee/user/application_security/sast/), [DAST](https://docs.gitlab.com/ee/user/application_security/dast/), [container](https://docs.gitlab.com/ee/user/application_security/container_scanning/) and [dependency scans](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/) than ever before. And, thanks to DevSecOps, a full 72% of security pros told us their organizations’ security efforts as either “strong” or “good.” \n\n## The difference between DevSecOps and DevOps\n\nDevSecOps *is* DevOps and honestly the terms are, can, and should be used interchangeably. That said, GitLab defines DevOps as [“...people working together to conceive, build and deliver secure software at top speed”](/topics/devops/) and, as you can see, that definition includes security. DevSecOps, on the other hand, “weaves security practices into every stage of software development right through deployment with the use of tools and methods to protect and monitor live applications.”\n\nSome think the term “DevSecOps” puts undue emphasis on security, but we heartily disagree. You can’t emphasize security enough!\n\n## Why is DevSecOps important to business?\n\nThe number one benefit of DevOps is code quality, according to our survey, and, clearly, that’s businesses’ priority as well; bad code costs money literally (time to fix) and figuratively (brand reputation). \n\nSo, if it’s [time to convince management to invest in DevSecOps](/blog/devops-stakeholder-buyin/), it’s important to continue to emphasize how devastating a security breach can be.\n\nAlso, it’s vital to connect the dots on exactly how a DevSecOps team can help prevent the worst-case scenarios. From [automated software testing](/blog/devsecops-security-automation/#5-benefits-of-automated-security) to a [security champions program](/blog/why-security-champions/), DevSecOps is one of the most efficient ways to help prevent hacks.\n\n## The future of DevSecOps\n\nThe future of DevSecOps can be summed up in one simple word: more. More testing, more automation, more integration, more shift left, more comprehensive scans… just more of everything that brings security into the development process earlier in the game.\n\nThere are signs that “more” is already happening, based on our 2021 survey results. Nearly 28% of security respondents report they are now part of a cross-functional team and a growing percentage are more focused on compliance. And more than 70% of security pros report their teams shifted left in 2021, up from 65% in 2020. In other words, security is increasingly _on the team._ \n\nAnd don’t forget about the promise of artificial intelligence and machine learning. As [AI/ML use expands in DevOps teams](/blog/ai-in-software-development/), DevSecOps will no doubt benefit.\n\n## Ready to learn DevSecOps?\n\nIf you’re ready to dive into DevSecOps, we have a 20 question quiz so you can test your readiness level and learn more.\n",[9,875,813],{"slug":2497,"featured":6,"template":686},"devsecops-faq-get-up-to-speed-on-this-hot-devops-area","content:en-us:blog:devsecops-faq-get-up-to-speed-on-this-hot-devops-area.yml","Devsecops Faq Get Up To Speed On This Hot Devops Area","en-us/blog/devsecops-faq-get-up-to-speed-on-this-hot-devops-area.yml","en-us/blog/devsecops-faq-get-up-to-speed-on-this-hot-devops-area",{"_path":2503,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2504,"content":2510,"config":2515,"_id":2517,"_type":14,"title":2518,"_source":16,"_file":2519,"_stem":2520,"_extension":19},"/en-us/blog/devsecops-platforms-give-smbs-security-muscle",{"title":2505,"description":2506,"ogTitle":2505,"ogDescription":2506,"noIndex":6,"ogImage":2507,"ogUrl":2508,"ogSiteName":670,"ogType":671,"canonicalUrls":2508,"schema":2509},"DevSecOps platforms give SMBs security muscle","A single platform enables teams to build, test, and deploy secure software with fewer resources.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667094/Blog/Hero%20Images/container-security.jpg","https://about.gitlab.com/blog/devsecops-platforms-give-smbs-security-muscle","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevSecOps platforms give SMBs security muscle\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2023-01-10\",\n      }",{"title":2505,"description":2506,"authors":2511,"heroImage":2507,"date":2512,"body":2513,"category":769,"tags":2514},[810],"2023-01-10","\nDevOps professionals with both security training and experience come at a high price and can be hard to find. That makes it especially difficult for startups and small and medium-sized businesses (SMBs), which generally don’t have deep pockets, to get the security professionals they need.\n\nSmaller businesses often end up with no security team, so they have to hire consultants. Even worse, they might end up having little to no security help at all, which will cause problems for their customers as well as their own business.\n\nOne efficient [way to deal with that](/blog/6-ways-smbs-can-leverage-the-power-of-a-devops-platform/) is to adopt a DevSecOps platform, which enables organizations to build, test, and deploy secure software with fewer hands and [expenses](/blog/how-smbs-can-save-with-gitlabs-devops-platform/).\n\n“Someone in an SMB likely doesn’t have enough expertise, or even enough people, in-house to handle every part of DevOps, so they end up having to hire a contractor or consultant to take on things like security and monitoring, and that strains their budget,” says [Fatima Sarah Khalid](https://gitlab.com/sugaroverflow), a developer evangelist at GitLab. “By adopting the GitLab DevSecOps Platform, they can more easily handle this work, despite limited resources.”\n \n## Four benefits for SMBs\n\nSo how does a complete DevSecOps platform add security muscle to a small business? \n\n### 1. Finding vulnerabilities early\n \nWith a single, end-to-end platform, [security is integrated throughout](/stages-devops-lifecycle/secure/), and not just bolted on as an afterthought. With capabilities like dynamic and static application security testing, vulnerability management, and dependency and container scanning, developers can find vulnerabilities earlier in the process when they often can be more easily and quickly fixed. By shifting security left this way, teams can perform threat and vulnerability analysis as developers create the code - not when it’s about to be deployed. Shifting security left also creates more secure software, and decreases the time it would have taken to track down a problem created much earlier in the process.\n \n### 2. Easing work with automation\n \nAutomation, which is built into a single DevSecOps platform, is critical because it brings consistency and repeatability to the entire software lifecycle, reducing the potential for human error and minimizing the introduction of bugs and risks. And that enables SMBs to produce more secure software for their own organizations, as well as for their customers.\n \nAnother major advantage of automation is that it minimizes the need for a lot of extra hands-on and time-consuming work, like code reviews and testing. Startups and small businesses, by nature, have smaller DevOps teams. They might even have an IT team of one or two people, who do everything from building software to serving as the help desk. Saving them from having to do repetitive manual work gives them back precious time they can spend on more innovative and productive jobs.\n \nAll of that automated testing is automatically logged and documented, helping organizations create easily searchable and useful best practices that will help speed future software builds.\n \n### 3. Ensuring compliance\n \nSMBs and companies just getting off the ground don’t want to get tripped up by tricky and costly compliance issues. Luckily, the same end-to-end platform enables teams to verify the compliance of their code without leaving their workflow. In GitLab, for example, compliance confirmation lives within the platform and is automated. Developers don't have to context-switch among different point solutions, boosting their productivity and efficiency. Automating compliance also removes one more task from [developers’ already busy schedules](/blog/ease-pressure-on-smb-developers-with-a-devops-platform/).\n \n### 4. Establishing security imperatives\n \nA DevSecOps platform gives SMBs speed and efficiency, without requiring them to string together various security tools or hire security consultants. With a platform, because security practices and automation are integrated from the very start, an SMB’s DevOps environment has a solid security foundation. One solution. One answer to security needs.\n\n## Meeting the security need\n \nIn today’s environment, security and compliance are business imperatives. There’s no getting around it.\n\nSo having a strategic, end-to-end platform approach, where security and compliance are embedded from planning to production, provides efficiency and value unmatched by traditional, third-party application security vendors. Companies that may be using DevOps but are only tacking together different tools simply aren’t getting the security advantages that come from a single DevSecOps application.\n\nStartups and SMBs have a steep hill to climb just to survive. Between March 2020 and March 2021, 1 million small businesses opened in the U.S., but 833,458 closed, according to the U.S. Small Business Administration. And in a volatile economic climate, survival gets even tougher. Today’s high inflation rates and market instability have small businesses bracing for economic uncertainties, according to the [MetLife & U.S. Chamber of Commerce Small Business Index](https://www.uschamber.com/sbindex/summary).\n\nAnd those numbers are just about sheer survival. That’s not to mention actually gaining a solid foothold in an organization’s industry, attracting loyal customers, and successfully taking on bigger competitors, which just makes the hill small businesses are climbing even steeper.\n\nPrepare to make that climb easier by migrating to a single, end-to-end platform. [Download our SMB-focused ebook](https://page.gitlab.com/resources-ebook-trading-diy-devops-for-a-single-platform-smb.html) learn the advantages to moving from a DIY DevOps toolchain to GitLab’s platform.\n",[9,2243,875,1181],{"slug":2516,"featured":6,"template":686},"devsecops-platforms-give-smbs-security-muscle","content:en-us:blog:devsecops-platforms-give-smbs-security-muscle.yml","Devsecops Platforms Give Smbs Security Muscle","en-us/blog/devsecops-platforms-give-smbs-security-muscle.yml","en-us/blog/devsecops-platforms-give-smbs-security-muscle",{"_path":2522,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2523,"content":2529,"config":2536,"_id":2538,"_type":14,"title":2539,"_source":16,"_file":2540,"_stem":2541,"_extension":19},"/en-us/blog/devsecops-platforms-help-smbs-scale-as-they-grow",{"title":2524,"description":2525,"ogTitle":2524,"ogDescription":2525,"noIndex":6,"ogImage":2526,"ogUrl":2527,"ogSiteName":670,"ogType":671,"canonicalUrls":2527,"schema":2528},"DevSecOps platforms help SMBs scale as they grow","Adopting a comprehensive platform early lets smaller businesses mature with best practices.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668641/Blog/Hero%20Images/smbscale.jpg","https://about.gitlab.com/blog/devsecops-platforms-help-smbs-scale-as-they-grow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevSecOps platforms help SMBs scale as they grow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2023-01-17\",\n      }",{"title":2524,"description":2525,"authors":2530,"heroImage":2526,"date":2531,"body":2532,"category":769,"tags":2533},[810],"2023-01-17","\nFor startups and small to medium-sized businesses (SMBs) working to expand their customer base, revenue, and standing in their industries, adopting a [DevSecOps](/topics/devsecops/) platform is one move that can help make all of that growth happen. \n\nThe trick is to migrate to a single, end-to-end platform when the organization is small, so bad habits are avoided early on and constructive processes can be built in and scale as the business grows. A DevSecOps platform enables small businesses to set up an environment and work processes that help them avoid common pitfalls that can come with growth.\n\n## How DevSecOps platforms help SMBs scale\n\nHere are a few ways a DevSecOps platform can help smaller businesses and startups scale:\n\n### Reducing complexity\n\nWhen someone is on a small IT team, the last thing they need is something complicating their job and taking up their precious time. And if they are stitching together multiple tools, they end up creating a [clumsy, ad-hoc toolchain](/blog/battling-toolchain-technical-debt/). That by its very nature forces DevOps professionals to wrestle with a chaotic environment that leads to bottlenecks and requires constant management, tweaking, updating, and switching between interfaces. All of that toolchain care and feeding comes at the expense of simply focusing on delivering code that drives the organization’s bottom line. \n\n### Avoiding silos\n\nMaybe a company is small enough that silos aren’t a problem... right now. But as the business grows, silos likely will grow along with it, causing problems. Silos mean people are heads down working on their own project, or even worse, their own part of a project, without any visibility into the rest of it, or the ability to comment or share their work. It’s easy to create silos if you’re not using a DevSecOps platform because people often naturally separate off into single-minded groups that do not communicate with or understand each other. DevSecOps platforms foster collaboration, making it easier to keep silos from forming in the first place. They create a working environment open to communication and collaboration. A platform will give people the ability to work together, and that collective effort will produce better software. \n\n### Increasing collaboration\n\nAdopting a single, end-to-end platform when a company is small or when a startup is just getting off the ground will enable and encourage everyone in the business (from IT to finance, marketing, and sales) to work together. And it’s easier to create [a collaborative culture](/blog/why-devops-collaboration-continues-to-be-important/) from the very beginning, when working together can become a habit – a normal means of operation. Instilling an environment of communication also is less disruptive and easier to manage in a company of 10, 25, or even 100 employees than in a much larger and complex business. Collaboration also will encourage innovation by bringing in ideas from people in a range of demographics and business interests. Innovative ideas will help businesses grow into more successful and larger companies.\n\n### Decreasing hands-on work\n\nBecause startups and SMBs have fewer IT people, let alone teams of DevOps professionals, the [automation](/blog/how-automation-is-making-devops-pros-jobs-easier/) that is an integral part of a DevSecOps platform eases their burden by decreasing the amount of hands-on work they have to do. With automation for jobs like backup, installation, and security testing built in, people spend less of their already-limited time needlessly repeating time-consuming tasks, or going back in the software lifecycle to find where a security bug was introduced. Automating tasks required for everything from design to build, test, and deployment also can reduce the potential for human error and provide consistency throughout the software lifecycle. By taking those jobs off DevSecOps teams' plates, they have more time to actually build and deploy innovative software and support the business. \n\nLet’s be clear: A startup or SMB isn’t too small for a DevSecOps platform. If an organization is building software, it needs a platform. Business executives don’t want to struggle to grow and look back regretfully and think, “Why didn’t I adopt a DevSecOps platform earlier?”\n\n“If you’re on a small team or even just a team of one, migrating could seem like a lot to take on,” says [Fatima Sarah Khalid](/company/team/#sugaroverflow), a developer evangelist at GitLab. “But it’s worth the effort to set yourself up for growth. With a platform, everyone in the company is able to work in the same environment on the same projects. That means a collaborative environment without silos is formed early and the business can grow with that culture, instead of trying to adopt it years down the road when bad work habits have already formed.”\n\nWith GitLab’s single, end-to-end DevSecOps platform, automation is a system feature and not something that has to be added in. It also helps organizations eliminate or even keep silos from forming, increases collaboration and communication, and decreases the complexities that are born of DIY toolchains.\n\n**Download our [ebook](https://page.gitlab.com/resources-ebook-trading-diy-devops-for-a-single-platform-smb.html)** to learn about the benefits of migrating from a toolchain to GitLab’s DevSecOps platform. \n\n_Cover image by [Markus Spiske](https://unsplash.com/de/@markusspiske?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://www.unsplash.com)_\n",[9,2534,1731,2535],"DevOps platform","growth",{"slug":2537,"featured":6,"template":686},"devsecops-platforms-help-smbs-scale-as-they-grow","content:en-us:blog:devsecops-platforms-help-smbs-scale-as-they-grow.yml","Devsecops Platforms Help Smbs Scale As They Grow","en-us/blog/devsecops-platforms-help-smbs-scale-as-they-grow.yml","en-us/blog/devsecops-platforms-help-smbs-scale-as-they-grow",{"_path":2543,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2544,"content":2550,"config":2556,"_id":2558,"_type":14,"title":2559,"_source":16,"_file":2560,"_stem":2561,"_extension":19},"/en-us/blog/devsecops-security-automation",{"title":2545,"description":2546,"ogTitle":2545,"ogDescription":2546,"noIndex":6,"ogImage":2547,"ogUrl":2548,"ogSiteName":670,"ogType":671,"canonicalUrls":2548,"schema":2549},"Automated security testing for DevSecOps","We share four fool-proof ways to bring your security automation to the next level and five reasons why it's critical.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662504/Blog/Hero%20Images/devsecops-automated-security.jpg","https://about.gitlab.com/blog/devsecops-security-automation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Automated security testing for DevSecOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2020-07-08\",\n      }",{"title":2545,"description":2546,"authors":2551,"heroImage":2547,"date":2552,"body":2553,"category":679,"tags":2554},[1016],"2020-07-08","\n\n_This is the third in our five-part series on getting started with [DevSecOps](/topics/devsecops/). Part one gives you nine ways to [shift security left](/blog/efficient-devsecops-nine-tips-shift-left/). Part two outlines the steps needed to create [silo-free collaboration](/blog/achieve-devsecops-collaboration/)._\n\nNearly 83% of developers in [GitLab’s 2020 DevSecOps survey](/developer-survey/) say they’re releasing code faster today than ever before thanks to [DevOps](/topics/devops/). About 65% also say security is shifting left in their organizations. How far left is that shift? Not that far: Over 60% of developers don’t actually run static [application security](/topics/devsecops/) testing (SAST) scans, and 73% don’t conduct dynamic application security testing (DAST) scans.\n\nThis needs to change.\n\nSecurity is often a bottleneck to faster releases but it is much too risky to minimize or ignore. DevSecOps promises to bring security  forward in the software development lifecycle (SDLC). This can be done a number of ways but automated security testing streamlines adoption and scalability. A respondent to this year’s DevSecOps Survey summarized it nicely:\n\n> Automated testing and continuous integration have made our deployments safer and more optimized. Now everyone in the team has the permission to deploy the code.\n\n## The need for security automation and good security practices\n\nThere is an attempted cyber-attack [every 44 seconds](https://us.norton.com/blog/emerging-threats/cybersecurity-statistics#:~:text=There%20isn't%20concise%20data,people%20being%20hacked%20per%20year.) on average. \n\n_Every. 44. Seconds._ \n\nThis also equates to approximately 2,200 daily attacks resulting in about 800,000 people being hacked each year. Unfortunately, no one has the time, patience, or bandwidth to keep their eyes and hands ready to stop or address cyber attacks on the horizon. That’s why security automation tools exist.\n\nAnd consider this: cyber attackers aren’t doing everything by hand – they employ automation too. This means security processes also [need automation to keep up](https://www.checkpoint.com/cyber-hub/cyber-security/security-automation/#:~:text=Security%20automation%20is%20the%20automation,scale%20to%20handle%20growing%20workloads.). \n\nA security automation solution can include real-time monitoring tools that constantly manage security vulnerabilities and take automatic action where needed. It’s like adding a second pair of invisible hands to the team to help prevent and resolve security issues. Increased security measures can save any organization time and money and avoid the loss of sensitive files. \n\n\n## 4 Ways to automate security in software development\n\n[Automation](https://docs.gitlab.com/ee/topics/autodevops/) comes in all shapes and sizes. Scans and policies can be programmed manually or come as set operations out of the box; scans can be triggered automatically at code commit or manually initiated; and these scans can result in automated remediation and reports or they can require human intervention. Here are four ways automated security testing can be integrated into your software development practices:\n\n1. Automate security scans for every code change by [running SAST scans](https://docs.gitlab.com/ee/user/application_security/sast/index.html). For ease of assessment, results should be sorted by the priority level of the vulnerability.\n\n1. Scan results should automatically initiate a work ticket or issue, or may stop a build depending on the policy in place. These results should be presented to the developer – in the workspace or IDE in use to avoid context switching – for instant remediation.\n\n1. Policies are automatically applied upon code commit with the option to capture and approve exceptions as needed.\n\n1. Analyze running web applications for known vulnerabilities [using DAST scans](https://docs.gitlab.com/ee/user/application_security/dast/). In GitLab, DAST scans can be automated by [including the CI job in your existing .gitlab-ci.yml file](https://docs.gitlab.com/ee/user/application_security/dast/#configuration), or by [using Auto DAST](https://docs.gitlab.com/ee/topics/autodevops/stages.html#auto-dast).\n\n\n\n## 5 Benefits of automated security\n\nIn addition to making jobs easier across development, security, and operations, automated security testing will help your team produce a safer and better-quality result.\n\n1. **Reduced human error.** Across all functions, automation reduces human error by taking the manual work out of tedious processes that rely on excessive attention to detail.\n\n1. **Early security intervention.** By placing security earlier in the SDLC, threats and vulnerabilities can be detected and addressed faster – hopefully before there’s even a chance that they’re exposed.\n\n1. **Streamlined vulnerability triage.** Automated scan reports can present the threat level of any vulnerability so that developers and security engineers alike can decide which must be addressed immediately and who is responsible for resolving the problem.\n\n1. **Repeatable security checks.** Any automated task should be repeatable, which means that all code can be reviewed and assessed the same way every time. This creates a trusted and secure environment and code base, and also helps reviewers identify patterns when results are presented in a consistent manner.\n\n1. **Responsibility clarification.** Automation takes uncertainty out of DevSecOps. Shifting security can cause confusion about who is responsible for what. But automated scans can present remediation options for the party responsible _at that stage of development_.\n\nBut it is also important to find a productive balance between automated security testing and manual work. For example, trying to automate overly rigorous policies may prove detrimental to business objectives and may not be realistically achieved – it’s important to find a balance between policy compliance and efficiency. It’s also key that automation doesn’t obstruct visibility. Make sure there is still a trail of operations to review if necessary – automated processes should still generate reports of what was done, when, and why the action was triggered. Last, but certainly not least: Automation is **not** meant to replace human beings. It is a tool meant to make their work more efficient and help them produce better results for the team, the business, and the customer.\n\n## Security automation vs. security orchestration\n\nThough they are different concepts, security automation and security orchestration perform similar functions. One serves the other to make security processes more efficient. \n\nSecurity automation focuses on automating individual tasks (possibly with AI technology) to simplify essential processes for security analysts. On the flip side, security orchestration connects tools in use alongside automation and streamlines the whole security procedure. Orchestration drives efficient automation.\n\n## Types of security automation tools\n\nTo keep track of security incidents (and prevent them in the future), teams use security automation tools and different types of security scanning. A few common types of security automation tools include:\n\n- Security Information and Event Management (SIEM): SIEMs help to automatically collect data across multiple sources and use it to give contextual background about security incidents.\n- Security Orchestration, Automation, and Response (SOAR): SOAR takes SIEM a step further than just contextual data collection and adds automated response options to the mix. SOAR alerts security analysts to problems and shuts down cyber threats automatically. \n- Extended Detection and Response (XDR): This proactive, automated solution combines SIEM, SOAR, and other security options into one managed source.\n\n## How security automation works with security analysts\n\nA human can’t do all of the necessary security work, nor can a security automation tool. It’s a symbiotic relationship to ensure that an organization feels the least amount of negative impact from a cyber attack possible. \n\nA security analyst, responsible for vulnerability management by identifying and resolving security flaws and conducting [audits](https://about.gitlab.com/blog/what-you-need-to-know-about-devops-audits/), gets a lot of help from automation. An automated security system can make someone aware of a problem and even help to resolve it while removing manual time constraints.\n\n**Read more about DevSecOps:**\n* [Efficient DevSecOps: 9 tips for shifting left](https://about.gitlab.com/blog/efficient-devsecops-nine-tips-shift-left/)\n* [Want better DevSecOps? Try cross-functional collaboration](https://about.gitlab.com/blog/achieve-devsecops-collaboration/)\n* [Compliance made easy with GitLab](https://about.gitlab.com/blog/compliance-made-easy/)\n* [How application security engineers can use GitLab to secure their projects](https://about.gitlab.com/blog/secure-stage-for-appsec/)\n\nCover image by [Daniele Levis Pelusi](https://unsplash.com/@yogidan2012) on [Unsplash](https://unsplash.com/photos/Pp9qkEV_xPk)\n{: .note}\n\n\n\n",[9,875,683,2555],"zero trust",{"slug":2557,"featured":6,"template":686},"devsecops-security-automation","content:en-us:blog:devsecops-security-automation.yml","Devsecops Security Automation","en-us/blog/devsecops-security-automation.yml","en-us/blog/devsecops-security-automation",{"_path":2563,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2564,"content":2570,"config":2575,"_id":2577,"_type":14,"title":2578,"_source":16,"_file":2579,"_stem":2580,"_extension":19},"/en-us/blog/devsecops-security-standardization",{"title":2565,"description":2566,"ogTitle":2565,"ogDescription":2566,"noIndex":6,"ogImage":2567,"ogUrl":2568,"ogSiteName":670,"ogType":671,"canonicalUrls":2568,"schema":2569},"DevSecOps basics: 5 steps to standardize (and then scale) security","DevSecOps is incomplete without speed and scale. Standardize security to make it happen.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663613/Blog/Hero%20Images/devsecops-security-standardization.jpg","https://about.gitlab.com/blog/devsecops-security-standardization","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevSecOps basics: 5 steps to standardize (and then scale) security\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2020-07-20\",\n      }",{"title":2565,"description":2566,"authors":2571,"heroImage":2567,"date":2572,"body":2573,"category":679,"tags":2574},[1016],"2020-07-20","\n_This is the fifth in our five-part series on [DevSecOps](/topics/devsecops/) basics. Part one offers nine tips to truly [shift left](https://about.gitlab.com/blog/efficient-devsecops-nine-tips-shift-left/). Part two outlines the steps needed to create [silo-free collaboration](https://about.gitlab.com/blog/achieve-devsecops-collaboration/). Part three looks at the importance of [automated security testing](https://about.gitlab.com/blog/devsecops-security-automation/). And part four details how to create a [strong security culture](https://about.gitlab.com/blog/security-culture-devsecops/)._\n\nStandardizing security policies comes in a variety of forms: regulatory compliance, access controls, acceptable use policies, security as code, and automation, to name a few. Ultimately, the idea is that your security team knows exactly what policies and methods have been used or applied to each project. \n\nThe goals of standardization are consistency, traceability, and repeatability. By consistently using the same security methods across all work, security knows what has been protected and what hasn’t. This helps them apply additional measures where necessary, and makes them aware of any needed exceptions. Ensuring that security methods are repeatable helps to expand adoption and scale security to the entire organization or enterprise. \n\n## Building a standardized security program\n\nA holistic security program should be composed of different levels of policies and compliance. Some policies should be company-wide, such as an [acceptable use policy](https://whatis.techtarget.com/definition/acceptable-use-policy-AUP), some will fulfill regulations like the [GDPR](https://gdpr-info.eu/) or [CCPA](https://oag.ca.gov/privacy/ccpa), and some will be specific to certain organizations within your business. \n\n### Standardizing security in DevOps\n\n[DevSecOps can be executed sustainably](/solutions/security-compliance/) at scale with standardized security practices. Here are five ways to standardize security across all of your development projects.\n\n#### Educate\n\nProvide security training and education to every employee. Companywide security initiatives [help to build a security culture](https://about.gitlab.com/blog/security-culture-devsecops/) and empower employees to take responsibility for security in their own work. Standardized training also spreads awareness of mandatory policies and alerts employees to the actions taken to both secure day-to-day operations and protect their customers. \n\n#### Coordinate\n\nCoordinate a predefined set of security requirements among dev, sec, and ops that can be coded into your pipeline and applied to every project. These can ensure regulatory compliance, foster secure coding practices, trigger red flags or notifications, and educate employees on security best practices.\n\n#### Authenticate\n\nAccess controls are a critical component of any security framework, and should be continually monitored and evaluated. By keeping close tabs on who needs access to what, you’re able to build a solid wall around your most critical processes and assets. This eliminates unnecessary access to sensitive data, and helps streamline tracing, recovery, and remediation efforts when something goes wrong. Access control policies also help defend your business by enhancing authentication requirements.\n\n#### Integrate\n\nEmbed scan and test tools within your development pipeline. Static and dynamic application security testing (SAST and DAST, respectively) can be set to run at every code commit and in the review app. Other tools and tests include IAST, fuzzing, licence compliance, container scanning, and dependency scanning (among others). Embedding tools directly into the pipeline allows you to know exactly what the code has been evaluated for, and also what the code has not been checked for. \n\n#### Automate\n\nIn DevSecOps, automation is the true key to standardized security practices as it allows for fast, secure development at scale. There are a number of ways to automate security within and around your development pipeline – the trick is to find an appropriate balance between automation and manual intervention. Automated policies should serve as guardrails that guide development smoothly from one security check to the next, but they should also allow for exceptions when needed. These guardrails should automatically generate reports from code scans and consolidate them into a [security dashboard](https://docs.gitlab.com/ee/user/application_security/security_dashboard/) for review. This helps to minimize human error and any false positives or negatives, allows for consistent vulnerability reporting, and can be used to measure a team’s performance against secure coding expectations. Automation also helps to prevent overly complex security programs by reducing ad-hoc policies and redundant work.\n\n## The best security programs will change\n\nSecurity will never be a set-it-and-forget-it practice. The threat landscape is constantly changing, external regulations will continue to evolve, and internal business requirements will always keep you on your toes. While setting standards for security will help your team manage the workload, these standards need to be constantly re-evaluated and updated. Outdated security practices will undermine even the most solid programs, so it’s important to use part of the time saved from standardizing and automating to plan for the future. \n\n_How efficient are your DevSecOps practices? [Take our DevSecOps Maturity Assessment to find out.](https://about.gitlab.com/resources/devsecops-methodology-assessment/)_\n\n**Learn more about DevSecOps:**\n* [Case Study: How Jasper Solutions offers “DevSecOps in a box” with GitLab”](https://about.gitlab.com/customers/jasper-solutions/)\n* [How to capitalize on GitLab Security tools with external CI](https://docs.gitlab.com/ee/integration/jenkins.html)\n* [How to overcome toolchain security challenges with GitLab](https://about.gitlab.com/blog/toolchain-security-with-gitlab/)\n\nCover image by [Andrew Ridley](https://unsplash.com/@aridley88) on [Unsplash](https://unsplash.com/photos/jR4Zf-riEjI)\n{: .note}\n",[9,875,683,2555],{"slug":2576,"featured":6,"template":686},"devsecops-security-standardization","content:en-us:blog:devsecops-security-standardization.yml","Devsecops Security Standardization","en-us/blog/devsecops-security-standardization.yml","en-us/blog/devsecops-security-standardization",{"_path":2582,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2583,"content":2588,"config":2593,"_id":2595,"_type":14,"title":2596,"_source":16,"_file":2597,"_stem":2598,"_extension":19},"/en-us/blog/devsecops-survey-released",{"title":2584,"description":2585,"ogTitle":2584,"ogDescription":2585,"noIndex":6,"ogImage":1801,"ogUrl":2586,"ogSiteName":670,"ogType":671,"canonicalUrls":2586,"schema":2587},"Our 2020 DevSecOps Survey found faster releases and changing roles","Nearly 3700 software pros shared their DevOps successes, failures and thoughts on the future. Here’s what you need to know.","https://about.gitlab.com/blog/devsecops-survey-released","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Our 2020 DevSecOps Survey found faster releases and changing roles\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-05-18\",\n      }",{"title":2584,"description":2585,"authors":2589,"heroImage":1801,"date":2590,"body":2591,"category":726,"tags":2592},[851],"2020-05-18","\n_Our [2022 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/previous/2022/) is out now! Learn the latest in DevOps insights from over 5,000 DevOps professionals._\n\nIn February 2020, nearly 3700 DevOps practitioners from 21 countries shared, often in their own words, the reality of their software development journeys. They told us DevOps works for them: Nearly 83% said they’re releasing code faster and about 60% are deploying code either multiple times a day, daily, or every other day. But they also offered details of a less obvious but perhaps more important shift – their roles are changing, in some cases dramatically, because of DevOps.\n\nAlthough this survey was completed before today’s unprecedented economic upheaval, we think the insights in our [2020 Global DevSecOps Survey](/developer-survey/) may help you get a greater understanding of real world DevOps and the way job responsibilities are changing for developers, security pros, operations team members, and testers.\n\n## Dev + Ops\n\nWhy are developers releasing code more quickly with DevOps? For starters, they’re adding some of the key DevOps components including CI, SCM, automated testing, and CD.\n\n_\"Pre-deployment tests have provided more confidence that the product is ready to be released, also delivery frequency has increased.\"_\n\nBut the technology changes only tell part of the story. Traditional operations-type duties like provisioning or maintaining environments are increasingly part of development responsibilities. Over 34% of developers say they define and/or create the infrastructure their app runs on.\n\n_\"Deployment has become a non-task. Bootstrapping new projects is 10x faster because of the reusable infrastructure.\"_\n\nDevelopers say they’re no longer doing lots of hands-on tasks – like manual testing, deployments or merging – but they are increasingly responsible for security. In fact 28% say they’re now solely responsible for security in their organizations, a clear sign that security is beginning to \"shift left\" in a material way.\n\n_\"Security varies project to project. DevOps is usually tasked with 'protecting' our environments. We devs try to follow industry standards code-wise.\"_\n\n## An uneasy alliance\n\nAlthough security remains a work in progress at many if not most organizations, there are a few signs that [DevSecOps](/solutions/security-compliance/) is actually happening. Security professionals report that they are (finally) part of cross-functional teams and are working more closely with developers than ever before.\n\n_\"(Security) is becoming less focused into silo positions and more of a jack of all trades role.\"_\n\nIn fact 65% of security teams say their organizations have \"shifted left\" though, when we drilled down to find out what that actually meant, the details became much less clear. Fewer than 19% put SAST scan results into a pipeline report a developer can access and dynamic application security testing (DAST) fares even worse – less than 14% of companies give developers access to those reports.\n\nAt the same time, security teams continue to report that developers don't find enough bugs early enough in the process and/or that they’re reluctant to fix them when they are discovered.\n\nTo add to the confusion, 33% of security pros say they’re solely responsible for security in their organizations. But nearly the same percentage – 29% – say *everyone* is responsible. The ideal, of course, is what was shared by one survey taker:\n\n_\"We don’t have separate security, developers and operations; we are DevSecOps (and more).\"_\n\n## In the clouds\n\nOperations is often the place where the proverbial rubber hits the road and that’s particularly true with DevOps. In fact over 60% of operations team members report their roles are changing thanks to DevOps.\n\nWhat do these new roles look like?\n\n_\"Ops is 60% new project work and 40% operations/fire-fighting/developer support.\"_\n\n_\"We ensure reliability and availability, improve developer efficiency, automation, tools, and observability.\"_\n\n_\"We keep the lights on.\"_\n\n_\"(Ops today is) anything between dev and ops. From planning to deployment but not monitoring and maintaining apps in production.\"_\n\nToday 42% of operations team members see their role as primarily managing hardware and infrastructure, while 52% say their first priority is managing cloud services.\n\n## The trouble with test\n\nFor the second year in a row our survey takers have pointed squarely to testing as the number one reason releases are delayed. Last year 49% said test was at fault; this year it was 47%.\n\nBut there are small signs of change. Almost three-quarters of organizations report they have shifted testing left, meaning they’ve moved it earlier into the development process. What does that actually mean? Approximately 31% said developers test some of their code and 25% said automated testing happens as code is being written. About 17% said dev and test work as a team to test \"as close to real time as possible,\" and about 9% said they practice test-driven development (TDD).\n\n_\"We do TDD. QA and dev act as a team. We have automated tests running parallel with developing code.\"_\n\nLike security, testers say they are now much more involved in the development process. Nearly 30% said they’re working more closely with developers, and 16% said they have \"a more visible seat at the table.\" And just over 15% said that thanks to DevOps, they’re much more likely to be able to \"test what matters.\"\n\n_\"We have to write less paper and tickets and have faster reaction times.\"_\n\n_\"We’re all the same – dev team is the ops team.\"_\n\n_\"We’re starting to see light at the end of the tunnel.\"_\n\n## Looking forward\n\nOur respondents had a big list of areas they hope to focus on for the future from automation to CI/CD and even going more deeply into DevOps. DevOps and lifelong learning clearly go hand in hand.\nBut let’s end on a high note. We asked developers how prepared they are for the future: 71% said prepared or very prepared, while less than 25% said \"not very prepared.\" But we like this comment left from one developer, who has the lifelong learning baked in:\n\n_\"I’m only prepared because I constantly keep tinkering on the side.\"_\n\n_Our [2022 Global DevSecOps Survey](/developer-survey/) has the latest insights from over 5,000 DevOps professionals. You can also compare it with [previous year surveys](/developer-survey/previous/)_\n",[9,681,109],{"slug":2594,"featured":6,"template":686},"devsecops-survey-released","content:en-us:blog:devsecops-survey-released.yml","Devsecops Survey Released","en-us/blog/devsecops-survey-released.yml","en-us/blog/devsecops-survey-released",{"_path":2600,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2601,"content":2607,"config":2612,"_id":2614,"_type":14,"title":2615,"_source":16,"_file":2616,"_stem":2617,"_extension":19},"/en-us/blog/docker-hub-rate-limit-monitoring",{"title":2602,"description":2603,"ogTitle":2602,"ogDescription":2603,"noIndex":6,"ogImage":2604,"ogUrl":2605,"ogSiteName":670,"ogType":671,"canonicalUrls":2605,"schema":2606},"How to make Docker Hub rate limit monitoring a breeze","Docker Hub Rate Limits are enforced and we need to find ways to monitor the remaining pull requests. Explore some ways to create a monitoring plugin for Nagios/Icinga/Sensu/Zabbix and test-drive a new Prometheus exporter in combination with Grafana.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681749/Blog/Hero%20Images/vidarnm-unsplash.jpg","https://about.gitlab.com/blog/docker-hub-rate-limit-monitoring","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to make Docker Hub rate limit monitoring a breeze\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2020-11-18\",\n      }",{"title":2602,"description":2603,"authors":2608,"heroImage":2604,"date":2609,"body":2610,"category":791,"tags":2611},[2473],"2020-11-18","\n\nWhen we learned about the [Docker Hub Rate Limit](/blog/mitigating-the-impact-of-docker-hub-pull-requests-limits/), we thought about ways to mitigate and analyse the new situation. Container images are widely used and adopted for sandbox environments in [CI/CD pipelines](/solutions/continuous-integration/) and cloud-native production environments with app deployment in [Kubernetes clusters](/solutions/kubernetes/).\n\n## What is meant by Docker Hub limits?\n\nEach `docker pull` request toward the central `hub.docker.com` container registry is being counted. When a defined limit is reached, future requests are blocked and might be delayed into the next free window. [CI/CD](/topics/ci-cd/) jobs cannot be executed anymore after receiving a HTTP error `429 - too many requests` and similar errors will be seen in production deployment logs for Kubernetes.\n\nDocker defines this limit with 100 anonymous requests every six hours for the client's source IP address. If you have multiple container deployments behind an IP address, for example a company DMZ using a NAT, this limit can be reached very fast. A similar problem happens with watchtower tools which try to keep your container images updated, for example on your self-managed GitLab Runner. The limit can be raised by logging in, and by getting a paid subscription.\n\nThe question is: Where can you see the current limit and the remaining pull requests?\n\n### How to check the Docker Hub request limit?\n\nThe [Docker documentation](https://docs.docker.com/docker-hub/download-rate-limit/#how-can-i-check-my-current-rate) suggests to use CLI commands which invoke `curl` HTTP requests against the Docker Hub registry and parse the JSON response with [jq](https://stedolan.github.io/jq/).\n\nDefine the `IMAGE` variable once for the following CLI commands to use:\n\n```shell\n$ IMAGE=\"ratelimitpreview/test\"\n```\n\nObtain a token for authorization. Optionally print the variable value to verify its content.\n\n```shell\n$ TOKEN=$(curl \"https://auth.docker.io/token?service=registry.docker.io&scope=repository:$IMAGE:pull\" | jq -r .token)\n\n$ echo $TOKEN\n```\n\nThe next step is to simulate a `docker pull` request. Instead of using `GET` as HTTP request method, a `HEAD` request is sent which does not count toward the rate limit. The response headers contain the keys `RateLimit-Limit` and `RateLimit-Remaining`.\n\n```shell\n$ curl --head -H \"Authorization: Bearer $TOKEN\" https://registry-1.docker.io/v2/$IMAGE/manifests/latest\n```\n\nThe limit in the example is `2500` with remaining `2495` pull requests. `21600` defines the limit time window as six hours.\n\n```\nRateLimit-Limit: 2500;w=21600\nRateLimit-Remaining: 2495;w=21600\n```\n\n`RateLimit-Reset` can be returned too, this will be the remaining time until the limits are reset.\n\n### Create a monitoring script\n\nThe CLI commands can be turned into a programming language of your choice which provides methods for HTTP requests and better response parsing. The algorithm needs to follow these steps:\n\n* Obtain an authorization token from Docker Hub. Username/password credentials can be optionally provided, otherwise the request happens anonymously.\n* Send a `HEAD` request to the Docker Hub registry and simulate a `docker pull` request\n* Parse the response headers and extract the values for `RateLimit-Limit` and `RateLimit-Remaining`\n* Print a summary of the received values\n\nA plugin script which can be used by Nagios/Icinga/Sensu/Zabbix and others has additional requirements. It needs to implement the [Monitoring Plugins API specification](https://www.monitoring-plugins.org/doc/guidelines.html):\n\n* Print the limit and remaining count\n* Calculate a state: Ok, Warning, Critical, Unknown and print a helpful text on the shell\n* Add optional warning/critical thresholds for the remaining count. Whenever the count is lower than the threshold, the state changes to Warning/Critical and the exit code changes: `OK=0, Warning=1, Critical=2, Unknown=3`\n* Collect limit values as performance metrics for graphing and visualization\n* Add verbose mode and timeout parameters as plugin development best practices. If Docker Hub does not respond within 10 seconds as default, the plugin exits and returns `Unknown` as state.\n\nYou can download the [check_docker_hub_limit.py plugin script](https://gitlab.com/gitlab-com/marketing/corporate_marketing/developer-evangelism/code/check-docker-hub-limit) and integrate it into your monitoring environment.\n\n#### Use the monitoring plugin script\n\nThe [check_docker_hub_limit.py plugin script](https://gitlab.com/gitlab-com/marketing/corporate_marketing/developer-evangelism/code/check-docker-hub-limit) plugin is written in Python 3 and requires the `requests` library. Follow the [installation instructions](https://gitlab.com/gitlab-com/marketing/corporate_marketing/developer-evangelism/code/check-docker-hub-limit#installation) and run the plugin script with the `--help` parameter to see all available options:\n\n```\n$ python check_docker_hub_limit.py --help\n\nusage: check_docker_hub_limit.py [-h] [-w WARNING] [-c CRITICAL] [-v] [-t TIMEOUT]\n\nVersion: 2.0.0\n\noptional arguments:\n  -h, --help            show this help message and exit\n  -w WARNING, --warning WARNING\n                        warning threshold for remaining\n  -c CRITICAL, --critical CRITICAL\n                        critical threshold for remaining\n  -v, --verbose         increase output verbosity\n  -t TIMEOUT, --timeout TIMEOUT\n                        Timeout in seconds (default 10s)\n```\n\nRun the script to fetch the current remaining count. The plugin script exit code returns `0` being OK.\n\n```\n$ python3 check_docker_hub_limit.py\nOK - Docker Hub: Limit is 5000 remaining 4997|'limit'=5000 'remaining'=4997\n\n$ echo $?\n0\n```\n\nSpecify the warning threshold with `10000` pulls, and the critical threshold with `3000`.\nThe example shows how the state changes to `WARNING` with a current count of `4999` remaining\npull requests. The plugin script exit code changes to `1`.\n\n```\n$ python3 check_docker_hub_limit.py -w 10000 -c 3000\nWARNING - Docker Hub: Limit is 5000 remaining 4999|'limit'=5000 'remaining'=4999\n\n$ echo $?\n1\n```\n\nSpecify a higher critical threshold with `5000`. When the remaining count goes below this value,\nthe plugin script returns `CRITICAL` and changes the exit state into `2`.\n\n```\n$ python3 check_docker_hub_limit.py -w 10000 -c 5000\nCRITICAL - Docker Hub: Limit is 5000 remaining 4998|'limit'=5000 'remaining'=4998\n\n$ echo $?\n2\n```\n\nWhen a timeout is reached, or another error is thrown, the exit state switches to `3` and the output state becomes `UNKNOWN`.\n\n### Use a Prometheus exporter for rate limit metrics\n\n[Prometheus](https://prometheus.io/) scrapes metrics from HTTP endpoints. There is a variety of exporters for Prometheus to monitor host systems, HTTP endpoints, containers, databases, etc. Prometheus provides [client libraries](https://prometheus.io/docs/instrumenting/clientlibs/) to make it easier to start writing your own custom exporter. The metrics need to be exported in a [defined format](https://prometheus.io/docs/instrumenting/exposition_formats/).\n\nThe Docker Hub limit values can be fetched with obtaining an authorization token first, and then sending a `HEAD` request shown above. The code algorithm follows the ideas of the monitoring plugin. Instead of printing the values onto the shell, the metric values are exposed with an HTTP server. The Prometheus client libraries provide this functionality built-in.\n\nWe have created a [Prometheus Exporter for Docker Hub Rate Limits](https://gitlab.com/gitlab-com/marketing/corporate_marketing/developer-evangelism/code/docker-hub-limit-exporter) using the [Python client library](https://github.com/prometheus/client_python). The repository provides a demo environment with `docker-compose` which starts the exporter, Prometheus and Grafana.\n\nEnsure that [docker-compose is installed](https://docs.docker.com/compose/install/) and clone/download the repository. Then run the following commands:\n\n```\n$ cd example/docker-compose\n\n$ docker-compose up -d\n```\n\nNavigate to `http://localhost:3030` to access Grafana and explore the demo environment with the pre-built dashboard.\n\n![Grafana dashboard for Docker Hub Limit Prometheus Exporter](https://about.gitlab.com/images/blogimages/docker-hub-limit-monitoring/grafana_prometheus_docker_hub_limit_exporter_demo.png){: .shadow.medium.center}\n\nGrafana dashboard for Docker Hub Limits\n{: .note.text-center}\n\n### More monitoring/observability ideas\n\nUse the steps explained in this blog post to add Docker Hub limit monitoring. Evaluate the Prometheus exporter or the check plugin, or create your own monitoring scripts. Fork the repositories and send a MR our way!\n\n* [check-docker-hub-limit for Nagios/Icinga/Zabbix/Sensu](https://gitlab.com/gitlab-com/marketing/corporate_marketing/developer-evangelism/code/check-docker-hub-limit)\n* [docker-hub-limit-exporter for Prometheus](https://gitlab.com/gitlab-com/marketing/corporate_marketing/developer-evangelism/code/docker-hub-limit-exporter)\n\nThe Prometheus exporter and the monitoring plugin script can help to see trends and calculate usage over time. Use your own local (GitLab) container registry or one of the available caching methods described in these blog posts:\n\n* [Cache Docker images in your CI/CD infrastructure](/blog/mitigating-the-impact-of-docker-hub-pull-requests-limits/). Use this resource for possible solutions around caching and proxying.\n* [Use the Dependency Proxy](/blog/minor-breaking-change-dependency-proxy/). Learn more about the GitLab Dependency Proxy being made open source in the future.\n* [#everyonecancontribute cafe: Docker Hub Rate Limit: Mitigation, Caching and Monitoring](https://everyonecancontribute.com/post/2020-11-04-cafe-7-docker-hub-rate-limit-monitoring/). This is a community meetup hosted by Developer Evangelists at GitLab. The blog post includes a video with more insights and discussion.\n\nPhoto by [Vidar Nordli-Mathisen](https://unsplash.com/@vidarnm) from [Unsplash](https://www.unsplash.com).\n{: .note}\n",[1041,9,1477,682,1339],{"slug":2613,"featured":6,"template":686},"docker-hub-rate-limit-monitoring","content:en-us:blog:docker-hub-rate-limit-monitoring.yml","Docker Hub Rate Limit Monitoring","en-us/blog/docker-hub-rate-limit-monitoring.yml","en-us/blog/docker-hub-rate-limit-monitoring",{"_path":2619,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2620,"content":2625,"config":2630,"_id":2632,"_type":14,"title":2633,"_source":16,"_file":2634,"_stem":2635,"_extension":19},"/en-us/blog/dont-confuse-these-twelve-shortcuts-with-iteration",{"title":2621,"description":2622,"ogTitle":2621,"ogDescription":2622,"noIndex":6,"ogImage":2055,"ogUrl":2623,"ogSiteName":670,"ogType":671,"canonicalUrls":2623,"schema":2624},"Don’t confuse these 12 shortcuts with iteration","Iteration is a GitLab value. Sid Sijbrandij, GitLab’s co-founder and CEO, discusses 12 shortcuts that are not iterations to help refine what is considered a good iteration.","https://about.gitlab.com/blog/dont-confuse-these-twelve-shortcuts-with-iteration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Don’t confuse these 12 shortcuts with iteration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2021-12-01\",\n      }",{"title":2621,"description":2622,"authors":2626,"heroImage":2055,"date":2627,"body":2628,"category":769,"tags":2629},[1609],"2021-12-01","\n\n[Iteration](https://handbook.gitlab.com/handbook/values/#iteration) is one of GitLab’s [top 3 values](https://handbook.gitlab.com/handbook/values/#hierarchy) because it enables everyone to be efficient in how they deliver value to customers and the wider community. Iteration helps us [build our product and improve our work lives](/blog/power-of-iteration/). \n\nIn essence, iteration is reducing the scope of your next piece of work to the smallest valuable thing possible so that you can deliver it fast. By reducing the scope and delivering to the user, rather than holding it back while finishing a larger scope of work, you benefit in the following ways: \n\n- Reduce coordination efforts\n- Reduce cancellations\n- Easier and faster reviews\n- Get feedback faster\n\nIterating helps you ensure that your next step is in the right direction. \n\nAs we’ve practiced iteration at GitLab, I’ve found that when it’s not clearly understood, well-intentioned mistakes can happen. Because iteration is fundamental to everything that we do, it’s critical to regularly reinforce and refine what we define as an iteration. \n\nTo help clarify what is iteration, it helps to see examples of what iteration is not. Here are 12 shortcuts that I’ve seen be mistaken as iteration. \n\n## 1. Reducing quality\n\nSome people will take shortcuts, which leads to lower quality in the final product or deliverable. You can’t reduce quality to minimize the scope of an iteration. Your iteration needs to meet the same quality standards you would expect for any of your work. \n\nFor example, in the case of a user interface, every button needs to work and be properly styled and aligned. Nothing should feel out of place or unfinished. You can reduce the amount of functionality, but the functionality that you deliver needs to look and function as expected. \n\n## 2. Avoiding or reducing documentation\n\nWhen defining the scope of an iteration, make sure you include the right information so that the user can properly understand what happened and can derive value from your work. In the case of a new feature, without proper documentation, the recipient may not understand how to best use the feature, which defeats the purpose of delivering it quickly. \n\nIteration will make documentation faster given the reduced scope, so don’t avoid or delay the documentation. \n\n## 3. Compromising on security\n\nYou can’t compromise on security in the spirit of moving faster. An iteration must meet the same security standards and follow all the necessary security practices to ensure that your product and work doesn’t introduce any new vulnerabilities. \n\nAs an example, when building new features in GitLab, no matter how small an iteration is defined, we should always prioritize the protection of customers’ data. \n\n## 4. Delivering something that's not on the recommended path or on by default\n\nTo call an iteration complete, it needs to be on the recommended path or on by default. Otherwise, most users won’t see or benefit from the work.\n\nAt GitLab, in the past, we have made the mistake of considering an iteration complete before making a new feature the default or recommended path for all our users, which then results in fewer users for that functionality. To prevent having functionality in the product that users won’t find, we now require that the feature is on the recommended path and on by default before we call the iteration complete. \n\nFor big changes, such as when a feature may have a big impact on user experience or stability, we use feature flags when initially shipping a feature. This is a good strategy to start delivering something gradually into the product, but we don’t consider the iteration done until that feature flag has been removed and the feature is on by default. \n\n## 5. Shipping something of no value\n\nWhen considering the smallest scope possible for an iteration, the ultimate test needs to be whether it delivers something of value to the end user. Don’t confuse iteration with making progress on an initiative or checking off items on your to-do list. \n\nFor example, when building a new feature you may need to do a fair amount of set up. You may ship to production code that adds a configuration or capability that you’ll need to build the feature, but it’s completely transparent to the user. While that can be considered progress on your project timeline, it is not an iteration. The iteration completes when the user can start to derive some value from your efforts. \n\n## 6. An excuse to focus on unimportant items\n\nIteration will help you move faster and deliver more things, but you still have to prioritize and focus on what’s most important. When picking what to work on, you shouldn’t do first what’s smallest in scope. Instead, pick what will give you the highest value for the effort you’ll put in.\n\n## 7. Changing or lowering goalposts\n\nChanging a goal or lowering a goal is not iteration. Iteration is reducing the scope and keeping it small, but the reduced scope still needs to meet your goals. As you practice iteration, you may set goals for smaller time periods, which is a good practice I recommend. But changing the goal post is not a part of iteration. \n\n## 8. Revisions you don't ship or publish\n\nIt’s a common mistake to confuse revisions with iterations. To clearly understand the difference, see whether you’ve shipped or delivered something of value to the end user. If you haven’t, it is a revision, not an iteration. \n\nFor example, if when writing a blog post you get a draft reviewed and rewritten several times before publishing, those are considered revisions. Your first iteration is completed once you’ve published the first version of the blog post. \n\n## 9. An excuse to impose unrealistically tight timelines\n\nIf you set a timeline, it has to be realistic. I’ve seen instances in which people confuse iteration with just shrinking the timeline to something unrealistic. That is not iterating. Iteration is minimizing scope, but it requires a disciplined review of the scope to ensure that you’re allocating the right amount of time to complete it. \n\n## 10. An excuse to avoid planning\n\nSometimes teams confuse iteration with moving quickly on something without planning. This is not iterating. By reducing the scope, there will be less planning involved compared to the initial larger scope. But, no matter how small you make the scope of work, you need to plan, and that planning can be quite involved. You need to set an appropriate timeline to deliver the work and plan appropriately for reviews and dependencies. \n\nNot planning appropriately for an iteration negatively impacts efficiency, team morale, and can impact people’s lives outside of work. This needs to be avoided. \n\n## 11. Imposing long hours\n\nDon’t confuse iteration with imposing long working hours for a team. The goal of iterating is to scope work in a way that helps you deliver more tangible value within the same amount of time. Increasing the number of hours that the team works is not iteration. \n\n## 12. Expecting others to fix your work\n\nWhen you iterate, you need to take ownership and make sure that the end result is of value and meets all expectations of a finished product. You should not call an iteration complete if the work still requires fixing in order to be of value or meet quality expectations. \n\nAs GitLab grows as a company, reinforcing our Iteration value and staying clear about what is an iteration is fundamental to us [staying a startup](/company/still-a-startup/). I hope these 12 examples that are not iteration are helpful and empower everyone to help identify and correct situations when iteration is used incorrectly. Using iteration correctly will help us continue to move fast and deliver more value to our customers. It will also help keep the day-to-day momentum as we deliver valuable results. \n\nWatch this GitLab Unfiltered video where I discuss these 12 shortcuts that are not iteration.\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n  \u003Ciframe src=\"https://www.youtube.com/embed/BW6TWwNZzIQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n",[9,683,1339],{"slug":2631,"featured":6,"template":686},"dont-confuse-these-twelve-shortcuts-with-iteration","content:en-us:blog:dont-confuse-these-twelve-shortcuts-with-iteration.yml","Dont Confuse These Twelve Shortcuts With Iteration","en-us/blog/dont-confuse-these-twelve-shortcuts-with-iteration.yml","en-us/blog/dont-confuse-these-twelve-shortcuts-with-iteration",{"_path":2637,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2638,"content":2644,"config":2649,"_id":2651,"_type":14,"title":2652,"_source":16,"_file":2653,"_stem":2654,"_extension":19},"/en-us/blog/ease-pressure-on-smb-developers-with-a-devops-platform",{"title":2639,"description":2640,"ogTitle":2639,"ogDescription":2640,"noIndex":6,"ogImage":2641,"ogUrl":2642,"ogSiteName":670,"ogType":671,"canonicalUrls":2642,"schema":2643},"Ease pressure on SMB developers with a DevOps platform","Small and medium-sized businesses have to be master multitaskers, but that's not always efficient. Here's how a DevOps platform can help.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668354/Blog/Hero%20Images/handshake.png","https://about.gitlab.com/blog/ease-pressure-on-smb-developers-with-a-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Ease pressure on SMB developers with a DevOps platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-09-06\",\n      }",{"title":2639,"description":2640,"authors":2645,"heroImage":2641,"date":2646,"body":2647,"category":769,"tags":2648},[810],"2022-09-06","\nAdopting a full, end-to-end DevOps platform eases strain on IT, and that is particularly important in small and medium-sized businesses (SMBs). \n\nSince there’s generally only a handful of IT professionals – at most – working in an SMB, they’re often trying to keep their heads above water. They’re constantly in motion, moving between keeping often less than top-of-the-line systems running, acting as the user help desk, and ensuring company data is safe. They’re not only wearing multiple hats, they’re putting out one fire after another.\n\nWorking under that kind of constant pressure leaves little time and focus for developing and deploying new software, which most [every SMB needs](/blog/can-an-smb-or-start-up-be-too-small-for-a-devops-platform/) to entice new customers, build the brand, and bring in revenue. Relieving that stress and enabling these tiny IT teams to succeed in creating great software products is about survival. And IT survival is about [adopting a DevOps platform](https://learn.gitlab.com/smbmigrationguide/migratedevopssmb).\n\nRelying on a full DevOps platform empowers IT professionals and enables them to eliminate wasted time and energy so they can focus on being a business driver. There are many parts of an end-to-end DevOps platform that lead to increased efficiency and decreased pressure on the IT team:\n\n- Automate processes – from testing to performance management and monitoring – to enable IT to be hands off with repetitive and often time-consuming tasks and eliminate the potential for human error that can use up a lot of time and money.\n\n- More quickly and efficiently turn a vision into software.\n\n- [Foster collaboration](/blog/5-ways-collaboration-boosts-productivity-and-your-career/) with people across departments to brainstorm design ideas and more efficiently make iterative deployments.\n\n- [Produce more stable and secure software](/blog/toolchain-security-with-gitlab/) that won’t need last-minute fixes or code re-writes.\n\n- Focus on delivering software instead of [managing toolchains](/blog/battling-toolchain-technical-debt/).\n\n- Stop switching back and forth between multiple tool interfaces, passwords, and ways of working.\n\n- Gain an overarching view of the entire development and deployment lifecycle.\n\n- Keep track of and easily access best practices to use in new projects by taking advantage of [continuous documentation](/blog/16-ways-to-get-the-most-out-of-software-documentation/) in the platform. \n\nLike anyone, IT professionals don’t perform best when they’re in a constant reactive state. Sure, many SMBs have started to use various DevOps tools to relieve the stress on IT, but if they haven’t adopted a single platform, then they’re simply creating more expense and more work for their already overburdened IT staff. That’s because by cobbling together a mishmash of disparate tools, they’re inadvertently creating an unwieldy toolchain that slows down deployment and the business it fuels. \n\nMoving to a full DevOps platform means shedding that costly and complex toolchain, speeding the transition of business vision into working software, and cutting the workload weighing down IT. \n\nAnd relieving that workload also is about keeping employees happy and less stressed. The [greatest resource a company has is its people](/blog/hiring-in-the-deep-end-of-the-talent-pool/). This is even more true for small companies where the pain of employee dissatisfaction and departure is felt even more acutely. Managers also don’t want projects waylaid because the people driving them are leaving. To stop that from happening, it’s critical to help people get their work done efficiently and more easily, which also reduces their stress and makes them happier.\n\nAn end-to-end platform isn’t just another tool. It’s a whole new way of working that can diminish the often chaotic environment that can surround IT. An SMB’s IT people will still wear many different hats but developing and deploying new software and iterations will be easier, more efficient, and less taxing.\n",[9,749,793],{"slug":2650,"featured":6,"template":686},"ease-pressure-on-smb-developers-with-a-devops-platform","content:en-us:blog:ease-pressure-on-smb-developers-with-a-devops-platform.yml","Ease Pressure On Smb Developers With A Devops Platform","en-us/blog/ease-pressure-on-smb-developers-with-a-devops-platform.yml","en-us/blog/ease-pressure-on-smb-developers-with-a-devops-platform",{"_path":2656,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2657,"content":2663,"config":2668,"_id":2670,"_type":14,"title":2671,"_source":16,"_file":2672,"_stem":2673,"_extension":19},"/en-us/blog/effective-ci-cd-pipelines",{"title":2658,"description":2659,"ogTitle":2658,"ogDescription":2659,"noIndex":6,"ogImage":2660,"ogUrl":2661,"ogSiteName":670,"ogType":671,"canonicalUrls":2661,"schema":2662},"Want a more effective CI/CD pipeline? Try our pro tips","Here’s how to take your CI/CD pipeline to the next level with hands on advice about faster builds, better security and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681447/Blog/Hero%20Images/cicdpipelines.jpg","https://about.gitlab.com/blog/effective-ci-cd-pipelines","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Want a more effective CI/CD pipeline? Try our pro tips\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-07-29\",\n      }",{"title":2658,"description":2659,"authors":2664,"heroImage":2660,"date":2665,"body":2666,"category":791,"tags":2667},[851],"2020-07-29","\n\nNow that your [CI/CD pipeline](/topics/ci-cd/) is up and running, it’s time to fine-tune the performance. This hands on guide will walk you through tweaks that will improve a CI/CD pipeline’s speed, functionality, security, and integration with other tools and platforms.\n\n## Built for speed\n\nCI/CD and DevOps promises faster releases and we know it’s true: Even a basic automated pipeline is much speedier than the old days of manual handoffs. But there are ways to make the CI/CD pipeline even zippier. One straightforward option that guarantees faster builds is to [autoscale runners](/blog/making-builds-faster-autoscaling-runners/). If you have 15 minutes to spare, you can link your GitLab CI pipeline to the [Google Kubernetes engine](/blog/gitlab-ci-on-google-kubernetes-engine/). And it doesn’t get much faster than using the [Auto DevOps option](/blog/guide-to-ci-cd-pipelines/) if you’re setting up a new pipeline from scratch.\n\n## Do more with less\n\nOnce a pipeline is humming along, it’s time to think about tinkering with what you have. This is one of our favorite things to do at GitLab – we even used our CI/D pipeline to [turn our group conversation into a podcast](/blog/group-conversation-podcast/). We had an [unconventional CI/CD journey](/blog/gitlab-journey-to-cicd/), which goes a long way to explaining our overall enthusiasm for this technology.\n\nOur best advice when it comes to an effective CI/CD pipeline is to think outside the box. Need build images? It’s [easy to do](/blog/building-build-images/) with your CI/CD pipeline. You can also [create a cross-project pipeline](/blog/cross-project-pipeline/), or [build a bridge between Rust and Firebase](/blog/python-rust-and-gitlab-ci/).\n\n## Make it secure\n\nIt’s fun to play around with CI/CD functionality, but it’s critical to make sure your pipeline is secure. Start by making sure you [know the threat landscape](/blog/defend-cicd-security/). If you store key data in secrets management service [Vault](https://www.vaultproject.io), here’s how GitLab [makes the integration process easier and safer](/blog/vault-integration-process/).\n\nAnd for Jenkins users, it’s simple to [create deterministic security jobs](https://docs.gitlab.com/ee/integration/jenkins.html) from within GitLab.\n\n## Work with what you have\n\nNo effective CI/CD pipeline exists in a vacuum and to get the most out of yours it’s important to seamlessly integrate with other platforms and tools.\n\nAWS users can [set up multi-account SAM deployments](/blog/multi-account-aws-sam-deployments-with-gitlab-ci/) or [autoscale GitLab CI](/blog/introducing-autoscaling-gitlab-runners-on-aws-fargate/) on Fargate.\n\nTeams working on Android projects can [can create a customized GitLab CI](/blog/setting-up-gitlab-ci-for-android-projects/) easily.\n\nAnd finally it’s possible to take advantage of Google’s Firebase, a backend-as-a-service tool, so you can enable [continuous deployment of database, serverless and apps](/blog/gitlab-ci-cd-with-firebase/).\n\n**Read more about CI/CD:**\n\n* [The four big benefits](/blog/positive-outcomes-ci-cd/) of CI/CD\n\n* [CI/CD challenges](/blog/modernize-your-ci-cd/) to consider\n\n* Everything you need to know about [Auto DevOps](/blog/auto-devops-explained/)\n\nCover image by [Jacek Dylag](https://unsplash.com/@dylu) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[109,9,231],{"slug":2669,"featured":6,"template":686},"effective-ci-cd-pipelines","content:en-us:blog:effective-ci-cd-pipelines.yml","Effective Ci Cd Pipelines","en-us/blog/effective-ci-cd-pipelines.yml","en-us/blog/effective-ci-cd-pipelines",{"_path":2675,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2676,"content":2682,"config":2687,"_id":2689,"_type":14,"title":2690,"_source":16,"_file":2691,"_stem":2692,"_extension":19},"/en-us/blog/efficient-devsecops-nine-tips-shift-left",{"title":2677,"description":2678,"ogTitle":2677,"ogDescription":2678,"noIndex":6,"ogImage":2679,"ogUrl":2680,"ogSiteName":670,"ogType":671,"canonicalUrls":2680,"schema":2681},"DevSecOps basics: 9 tips for shifting left","Here's how to create an efficient DevSecOps practice and shift your security left.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663602/Blog/Hero%20Images/efficient-devsecops-9-tips.jpg","https://about.gitlab.com/blog/efficient-devsecops-nine-tips-shift-left","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevSecOps basics: 9 tips for shifting left\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2020-06-23\",\n      }",{"title":2677,"description":2678,"authors":2683,"heroImage":2679,"date":2684,"body":2685,"category":679,"tags":2686},[1016],"2020-06-23","\n_This is the first in a five-part series on getting started with [DevSecOps](/topics/devsecops/). Part two outlines the steps needed to create [silo-free collaboration](/blog/achieve-devsecops-collaboration/). Part three looks at the importance of [automated security testing](/blog/devsecops-security-automation/). And part four explains how to [build a strong security culture](/blog/security-culture-devsecops/) to support your DevSecOps efforts._\n\nSpeed is required to stay competitive – nearly 83% of our [2020 Global DevSecOps Survey](/developer-survey/) respondents said they’re releasing code faster than ever with DevOps. With the pace of work accelerating, some important details are easily overlooked or underestimated – like security. \n\nThink back to the last several projects your team has launched. Did security testing begin late in your software development lifecycle (SDLC)? Was too much time wasted on friction between siloed development and security? Was the project delayed due to inefficient handoff between teams, lack of visibility across systems, or lack of planning and consideration?\n\nAll of these are symptoms of outdated security practices trying to fit into your DevOps or Agile methodologies. Upgrade your organization to DevSecOps by [shifting left](/topics/ci-cd/shift-left-devops/): Bring security to the front of your development pipeline. \n\n## Security is changing – with a long way to go\n\nSecurity respondents in our 2020 Global DevSecOps Survey report changes in their roles: Being increasingly included as part of a cross-functional team focused on security (27.73%), becoming more involved in the day-to-day/more hands on (26.94%), and focusing more on compliance (22.55%). Only 19.95% report that their role is not changing.\n\nIt’s evident that companies are beginning to shift their security practices, but security testing remains a source of frustration: Over 42% of survey respondents said that testing happens too late in the lifecycle. This may be due to conflicting opinions on who is responsible for security. Nearly 33% of respondents said the security team is responsible, while almost as many people (29%) said that everyone was responsible. \n\nHowever, it’s difficult for everyone to be responsible when developers aren’t provided with the proper tools and resources to assess the security of their code: Surprisingly, static [application security](/topics/devsecops/) testing (SAST) is still not a common developer tool: Less than 19% of companies surveyed in this year’s DevSecOps report put SAST scan results into a pipeline report that developers can access, and over 60% of developers don’t actually run SAST scans. \n\n## The importance of collaboration between security and development teams \n\nSecurity is a top priority in DevOps methodology because security breaches are troublesome and expensive, and the threats are persistent. Historically dev and sec [have not gotten along](/blog/developer-security-divide/), and when communication between groups is poor, it can be easier for security vulnerabilities to take hold. Also, dev and sec [rarely agree on who owns security](/blog/gitlabs-2022-global-devsecops-survey-security-is-the-top-concern-investment/), which is a problem seen time and again in GitLab’s Annual DevSecOps Surveys.\n\nOur survey isn’t the only one finding strife between the teams. The [Ponemon Institute Research report](https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fwww.zeronorth.io%2Fresource%2Fponemon-report-revealing-the-cultural-divide-between-application-security-and-development%2F&data=04%7C01%7CHeather.Rubash%40netspi.com%7C5db8edd20731475c73e908d8868a4116%7C47bfc77a6733477ba2b2ecf6b199e835%7C0%7C0%7C637407275653430081%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=l7TP5PRZjCs1PqCV6JBrPVNMFQuLyBt%2BIOot6rUb5gw%3D&reserved=0) indicated that 71 percent of AppSec professionals believe security isn’t taken seriously by devs; specifically, they believe developers aren’t building in security at a sufficiently early stage. \n\nBut, when security and development teams collaborate early and often on security scanning for their code, there are a number of improvements, including:\n\n- Improved code quality \n- Fewer time-consuming and costly fixes\n- Full visibility of security measures for the whole organization\n- Minimized risk of security breaches\n- Top-notch security testing\n\nSecurity tools and automation can only take teams so far. There is no “set it and forget it” option when it comes to security. There needs to still be a human at the wheel. Collaboration between development and security teams needs to be as much of a priority as security itself. DevSecOps needs to be a culture, not just a practice.\n\nTo remove team siloes around security, here are a few considerations:\n\n1. **Understand what is driving each respective team.** The motivations behind the choices significantly affect how DevSecOps efforts turn out. \n2. **Do the big things together.** Auditing existing security tools, processes, and places where automation is or isn’t in place should be a group effort, not an individual team effort.\n3. **Define security objectives and responsibilities at every stage of the SDLC.** Early scanning and testing are vital to security, and everyone can be part of checking that these security checks are happening. \n4. **Plan and execute a comprehensive security training plan.** Have clear guidelines on security goals and steps to follow in case of an active threat. \n5. **Consider creating a [security champions program](/blog/why-security-champions/).**\n\n### Key to efficient security: Clarity\n\nCommunication cannot be understated when it comes to shifting left. Moving security forward in the software lifecycle won’t help anyone if your team doesn’t understand their responsibilities and expectations. Document any and all role changes when shifting your security practices, and then make sure that all parties have the tools necessary to get the job done. \n\n## What is shifting left?\n\nShift left is a DevOps testing concept to speed up development while at the same time improving code quality. Think of the code development process as starting on the “left” with development and ending on the “right” with deployment, so shifting the testing stage left means moving it from the end of the process to close to the beginning. Shifting left is made far easier with test automation.\n\n### 3 Important reasons to shift left\n\n1. **More code gets tested.** By bringing security forward in the SDLC, you provide more opportunities for code to be scanned and vulnerabilities to be remediated. By automating static application security testing (SAST) at every code commit, for example, you can at least ensure that all code has been scanned once.\n1. **Planning becomes more well-rounded.** Shifting left is not just about technology – it’s also about people. Bring a security DRI into your initial planning meeting to make sure you account for security needs in all stages of the SDLC. This will help streamline end-of-cycle security reviews, reduce friction between teams, and increase the likelihood of hitting your deadline with a secure product.\n1. **Better accountability among non-security team members.** Shifting left lets your team know that everyone is now expected to take security seriously and make it a part of their daily work. \n\n## 9 Tips for efficient DevSecOps\n\n1. Measure time lost in dealing with vulnerabilities after code is merged. Next, look for a pattern in the type or source of those security vulnerabilities, and make adjustments for improvement.\n2. Identify pain points and software risks between development and security, create a plan to resolve them, and then execute on that plan. \n3. Make small code changes. Smaller updates are easier to review and secure and can be launched more quickly than monolithic project changes.\n4. Automate and integrate security scans. Make scans ubiquitous so that every secure code change is reviewed and security flaws are found at their source of creation.\n5. Build security scans into the developer's workflow. Integrated security enables developers to find and fix vulnerabilities before the code ever leaves their hands. This also reduces the volume of open-source vulnerabilities sent to the security team, streamlining their review.\n6. Give developers access to SAST and DAST reports. While this is important for remediation, it's also a valuable tool to help developers build secure coding practices.\n7. Reduce or eliminate any waterfall-style security processes within your SDLC. You should always be able to change direction as needs arise: Keep your organization and your security controls nimble.\n8. Give the security team visibility into both resolved and unresolved vulnerabilities in code, where the vulnerabilities reside, who created them, and their status for remediation.\n9. Streamline your toolchain so that employees can focus their attention on a single interface: A single source of truth.\n\n_How efficient are your DevSecOps practices? [Take our DevSecOps Maturity Assessment to find out.](https://about.gitlab.com/resources/devsecops-methodology-assessment/)_\n\n**Learn more about DevSecOps:**\n\n[How to harden your GitLab instance](/blog/gitlab-instance-security-best-practices/)\n\n[Make your toolchain more secure](/blog/toolchain-security-with-gitlab/)\n\n[Our goals with Zero Trust](/blog/zero-trust-at-gitlab-problems-goals-challenges/)\n\nCover image by [Marc Sendra Martorell](https://unsplash.com/@marcsm) on [Unsplash](https://unsplash.com/photos/-Vqn2WrfxTQ)\n{: .note}\n",[855,749,9,875],{"slug":2688,"featured":6,"template":686},"efficient-devsecops-nine-tips-shift-left","content:en-us:blog:efficient-devsecops-nine-tips-shift-left.yml","Efficient Devsecops Nine Tips Shift Left","en-us/blog/efficient-devsecops-nine-tips-shift-left.yml","en-us/blog/efficient-devsecops-nine-tips-shift-left",{"_path":2694,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2695,"content":2701,"config":2706,"_id":2708,"_type":14,"title":2709,"_source":16,"_file":2710,"_stem":2711,"_extension":19},"/en-us/blog/efficient-pipelines",{"title":2696,"description":2697,"ogTitle":2696,"ogDescription":2697,"noIndex":6,"ogImage":2698,"ogUrl":2699,"ogSiteName":670,"ogType":671,"canonicalUrls":2699,"schema":2700},"Extract greater efficiency from your CI pipelines","Learn some techniques to find the balance between pipeline performance and resource utilization.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667534/Blog/Hero%20Images/ci-pipeline.jpg","https://about.gitlab.com/blog/efficient-pipelines","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Extract greater efficiency from your CI pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vlad Budica\"}],\n        \"datePublished\": \"2022-03-09\",\n      }",{"title":2696,"description":2697,"authors":2702,"heroImage":2698,"date":2279,"body":2704,"category":791,"tags":2705},[2703],"Vlad Budica","\nWhen discussing efficiency, typically we need to balance two things: time and money. It's quite easy to optimize for just one of these parameters. However, that can be an oversimplification. Within some constraints, more resources (i.e., hardware and Runners) equal better performance. Yet, the exact opposite is true for other constraints. In this article, I will walk you through the process of finding the sweet spot in optimizing your GitLab CI pipeline. The principles that I'll cover work well for existing pipelines and also for new ones. Please note that this is subjective and the sweet spot might be very different for different users in different scenarios.\n\nAs we dig into the technical aspects, note that we are looking for an overall optimization of a pipeline, as opposed to just looking at a particular job. The reasoning behind it is that local optimizations might make the overall pipeline less efficient (we might generate bottlenecks).\n\nThe optimization recommendations below fall into two categories:\n- Execute fewer jobs and pipelines\n- Shorten the execution time of jobs and pipelines\n\nThe first step before modifying an aspect of a system is to understand it. Observe it in full. You need to know the overall pipeline architecture and also the current metrics for it. You need to know the total execution time, jobs that take a large amount of time to finish (any bottlenecks), and the total job workload (potential queue time) and Runner capacity – these last two go hand in hand. Finally, we can use [Directed Acyclic Graphs](https://docs.gitlab.com/ee/ci/directed_acyclic_graph/), or DAGs, to visualize the pipeline and see the critical path (the minimum and maximum pipeline duration). We want to do this because we want to minimize as much as possible the detrimental impact doing changes can have on pipeline performance.\n\n## Execute fewer jobs and pipelines\n\nLet's look at ways of reducing the number of jobs and pipelines that get executed.\n\n### Apply rules\n\nThe first thing would be to decide what needs to be executed and when. For example, with a website, if the only change that was performed is to the text on the page, then the resulting pipeline doesn't need to contain all the tests and checks that are performed when changing the web app.\n\nThis requires the use of the [rules keyword](https://docs.gitlab.com/ee/ci/yaml/#rules). Rules are evaluated when a pipeline is created (at each trigger), and evaluated in order until the first match. When a match is found, the job is either included or excluded from the pipeline, depending on the configuration.\n\nThrough the rules keyword you can decide very precisely when a job should run or not. More information about use cases and configuration parameters can be found in the [doc page for rules](https://docs.gitlab.com/ee/ci/yaml/#rules).\n\n### Make jobs interruptible\n\nNow that jobs are only running when needed, you can focus on what happens when a new pipeline is triggered while a job is still running. This can lead to inefficiencies because we already know the job isn't running on the latest change performed on the target branch and that the results will get scrapped.\n\nThis is where the [interruptible keyword](https://docs.gitlab.com/ee/ci/yaml/#interruptible) comes in. It enables us to specify that a job can be interrupted when a newer one is triggered on the same branch. This should be coupled with the [automatic cancellation of redundant pipelines feature](https://docs.gitlab.com/ee/ci/pipelines/settings.html#auto-cancel-redundant-pipelines) so, in the end, jobs will be automatically canceled when newer pipelines are triggered.\n\nOne word of caution, use this mechanism only with jobs that are safe to stop such as a build or a test job. Don't use this with your deployment jobs as you're eventually going to end up with partial deployments. \n\nOne last point around executing fewer jobs and pipelines is to try to reschedule non-essential pipelines to as least frequent as possible. It's a balance that needs to be found between running the pipelines too often and not running them enough. Just go with the minimum acceptable by your company policy.\n\n## Shorten the execution time of jobs and pipelines\n\nThe next thing would be to find ways of making our jobs and pipelines execute in less time.\n\n### Execute jobs in parallel\n\nYou can [create DAGs in your pipelines](https://docs.gitlab.com/ee/ci/directed_acyclic_graph/) to create relationships between jobs and ensure that jobs are executed as soon as all the requirements are met if there are any and that they aren't waiting unnecessarily for other jobs to finish. By using the [needs keyword](https://docs.gitlab.com/ee/ci/yaml/#needs) together with the [parallel keyword](https://docs.gitlab.com/ee/ci/yaml/#parallel), you can implement DAGs.\n\nAnother useful mechanism to drive parallelism is [parent-child pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html), which enable you to trigger concurrently running pipelines.\n\nThese offer great flexibility and by using them you can execute your workloads in parallel as efficiently as possible. This can be a double-edged sword though as DAGs and [parent-child pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html) will increase the complexity of your pipelines, making them harder to analyze and understand. Within this very complex environment, you can run into unwanted side effects such as increased cost or even reduced efficiency.\n\nThe more jobs and pipelines you run in parallel, the more load will be put on your Runner infrastructure. If you do have an autoscaling mechanism and a large enough pool of resources, this will ensure no big queues are created and that things are running smoothly, but also lead to increased infrastructure costs. On the other hand, if you don't have autoscaling or if you have lower limits for the amount of resources available, the costs will be kept in check but your overall execution time will suffer because jobs will wait longer in queues.\n\n### Fail fast\n\nIt's desirable to detect errors and critical failures as soon as possible in your jobs and pipelines, and stop the execution. If you wait until toward the end of the pipeline to fail, the whole pipeline will waste hardware resources and increase your execution and waiting times. This is easier to implement when first designing a pipeline but can be achieved as well through refactoring of your existing ones.\n\nTesting usually takes a lot of time so this means that we're waiting for the execution to finish before canceling the whole pipeline if the tests fail. What we want to do is move the jobs that run quicker earlier in the pipeline thus getting feedback sooner. To configure this behavior, use the [allow_failure keyword](https://docs.gitlab.com/ee/ci/yaml/#allow_failure) and only for jobs that when fail should fail the whole pipeline.\n\n### Caching\n\nYou can also optimize the caching of your dependencies, which will improve the execution time. This can be very useful for jobs that fail often but for which the dependencies don't change that often.\n\nTo configure this in your jobs, you should use the [cache:when keyword](https://docs.gitlab.com/ee/ci/yaml/#cachewhen).\n\n### Optimize your container images\n\nUsing big images in your pipelines can slow things down significantly, as they take longer to be pulled. So the solution would be to use smaller images. Simple, right?\n\nWell, it's not always that easy to do, so you should start by analyzing your base image and your network speed as these two will give an indication of how long it will take for the image to be pulled. The network connection we're interested in is the one between your Runner and your container registry.\n\nOnce we have this kind of information, we can decide to host the image in another container registry. If you have GitLab hosted in a public cloud you should use the container image registry provided by that provider. An alternative that works no matter where GitLab is hosted is to use the internal GitLab container registry that's included with your service.\n\nYou will get better results if instead of using a master container image that holds everything that you need to run the whole pipeline, you use multiple smaller ones that are tailored for each job. It's faster if you use custom container images and have all the tools you need pre-installed. This would also be a safer option because you can validate more thoroughly the contents of the image.\n\nMore information about this topic can be found in [Docker's \"Best practices for writing Dockerfiles\"](https://docs.docker.com/develop/develop-images/dockerfile_best-practices/).\n\n## Pipeline optimization is part science, part art\n\nYou should approach your pipeline optimization efforts through a continuous improvement lens. This process is part science, part art as there aren't any quick solutions that you can apply and get your ideal result.\n\nI encourage you to test, document, and analyze the results when it comes to pipeline optimization efforts. You try one thing, look for feedback from the metrics of your pipelines, document the results, the changes, and the new architecture (this can happen in GitLab issues and merge requests) so you can extract some learnings, and the cycle starts again.\n\nSmall gains will add up and provide significant improvements at a higher scale. As I mentioned before, look for overall improvements instead of local ones. Now applying these principles to each project (pipeline templates makes it easier to adopt at scale), we can look at how these improvements across projects add up.\n\nRead more: Learn how to [troubleshoot a GitLab pipeline failure](/blog/how-to-troubleshoot-a-gitlab-pipeline-failure/).\n",[9,976,977],{"slug":2707,"featured":6,"template":686},"efficient-pipelines","content:en-us:blog:efficient-pipelines.yml","Efficient Pipelines","en-us/blog/efficient-pipelines.yml","en-us/blog/efficient-pipelines",{"_path":2713,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2714,"content":2720,"config":2725,"_id":2727,"_type":14,"title":2728,"_source":16,"_file":2729,"_stem":2730,"_extension":19},"/en-us/blog/eight-steps-to-prepare-your-team-for-a-devops-platform-migration",{"title":2715,"description":2716,"ogTitle":2715,"ogDescription":2716,"noIndex":6,"ogImage":2717,"ogUrl":2718,"ogSiteName":670,"ogType":671,"canonicalUrls":2718,"schema":2719},"8 Steps to prepare your team for a DevOps platform migration","Getting teams ready enables them to migrate with more confidence and ease. Here's how to get started.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663786/Blog/Hero%20Images/craftsman-looks-at-continuous-integration.jpg","https://about.gitlab.com/blog/eight-steps-to-prepare-your-team-for-a-devops-platform-migration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"8 Steps to prepare your team for a DevOps platform migration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-08-16\",\n      }",{"title":2715,"description":2716,"authors":2721,"heroImage":2717,"date":2722,"body":2723,"category":769,"tags":2724},[810],"2022-08-16","\nWhen organizations are getting ready to [move to a DevOps platform](https://page.gitlab.com/migrate-to-devops-guide.html), taking the time to get IT teams prepped for the migration will mean people can make the transition with more confidence and efficiency.\n\nBy [replacing a complicated mix of DevOps tools](/topics/devops/use-devops-platform-to-avoid-devops-tax/) with a single, end-to-end DevOps platform, you are about to change the way people work in a fundamental way. That will bring many benefits, like cutting tool-management costs, [increasing security](/blog/one-devops-platform-can-help-you-achieve-devsecops/), speeding software creation and deployment, and [replacing silos with a collaborative environment](/blog/5-ways-collaboration-boosts-productivity-and-your-career/). But any kind of change can create anxiety. By reaching out to people as part of your migration prep, managers can calm those stresses, create champions for the adoption, and ease the work that’s to come. \n\nLet’s look at what IT leaders can do to ease this transition for everyone.\n\n## Build buy-in\n\nStarting at the VP and CIO level, create organization-wide buy-in for this migration. This will be a wide-reaching project so everyone from the C-suite on down needs to be on board. Help them understand the importance of making this move. It’s not about adding a new tool – it’s about improving the way software development works overall, so make sure everyone is invested _from the beginning_. “Management and DevOps teams both need to understand that not migrating will ultimately take up more time and energy because they’d be forced to continue time-consuming glue work and duct taping to keep the toolchain stitched together,\" says [Brendan O’Leary](/company/team/#brendan), staff developer evangelist at GitLab. “People will be doing a lot less of that after a migration.”\n\n> Join us at [GitLab Commit 2022](/events/commit/) and connect with the ideas, technologies, and people that are driving DevOps and digital transformation.\n\n## Find champions\n\nEarly in the process, find your innovators and migration champions. Talk with people on every team to figure out who is excited about adopting a DevOps platform. These people will be critical. Empower them to lead the charge by allowing them to be the first to migrate with your full, visible support. Then their migration successes will serve as inspiration for those less excited to make the move.\n\n## Ease tension\n\nRemember that change makes people nervous and be sensitive to that. Get ahead of any anxieties by laying out how continuing on with their existing (and ever-expanding) [toolchains will only suck up more of their time and efforts](/blog/the-journey-to-a-devops-platform/) because they’ll have to remain focused on juggling a tangle of tools, instead of actually turning plans into software. Toolchains are not the fun part of their jobs, and they’ll be letting go of that.\n\n## Set expectations\n\nTalk with workers about what this will mean for them individually. Reassure them that this does not mean their jobs will be eliminated. However, it will change their day-to-day responsibilities since they’ll be doing less feeding and watering of disparate tools. That will give them more time to take on bigger, more valuable and more interesting projects. Developers, in particular, want to [work on projects that matter](/blog/why-software-developer-job-satisfaction-matters-and-how-to-make-it-happen/). Decreasing the toolchain red tape will be a huge step towards increased job satisfaction. \n\n## Define roles\n\nNot everyone on every team will work on the migration. Some will need to keep software development and deployment moving along, while others work on the adoption. Make it clear to individual team members what their roles will be. They’ll automatically be more at ease if it’s clear what their migration responsibilities will be.\n\n## Plan for training\n\nAssure everyone there will be training. They won’t just be thrown into the deep end of the pool. Make sure they know you will be setting them up for success.\n\n## Create sample projects\n\n[Fatima Sarah Khalid](/company/team/#sugaroverflow), a developer evangelist at GitLab, says that even before a migration even begins, managers should ensure their team members are ready to use a DevOps platform to do everything from planning to testing, and pushing software iterations through to production. “Managers should think about having a sample project set up with issues and epics. Set up workflows and merge requests. Run it all through,” says Khalid. “Getting hands-on experience before the migration will get rid of anyone’s fear that they’ll break something.”\n\n## Lay out the benefits\n\nMake sure everyone understands the benefits of using a DevOps platform:\n\n- Your business will be able to quickly, securely, and efficiently turn a vision into software.\n\n- Working in isolated silos will be replaced with working in tandem with teammates, [collaborating, and sharing information and responsibilities](/blog/if-its-time-to-learn-devops-heres-where-to-begin/).\n\n- A single application will give an overarching view of projects, enabling teams to check in on, comment on and offer suggestions on projects as they move through the development lifecycle.\n\n- Security and compliance will increase as it will be built into every step of the development and deployment lifecycle.\n\n- [Built-in automation](/blog/want-faster-releases-your-answer-lies-in-automated-software-testing/) will reduce repetitive hands-on work with everything from testing to documentation.\n\nBy preparing teams to make the move to a DevOps platform, the entire migration process will be easier and more efficient. For more information on transitioning to an end-to-end platform, [check out this ebook](https://page.gitlab.com/migrate-to-devops-guide.html).\n",[9,109,749],{"slug":2726,"featured":6,"template":686},"eight-steps-to-prepare-your-team-for-a-devops-platform-migration","content:en-us:blog:eight-steps-to-prepare-your-team-for-a-devops-platform-migration.yml","Eight Steps To Prepare Your Team For A Devops Platform Migration","en-us/blog/eight-steps-to-prepare-your-team-for-a-devops-platform-migration.yml","en-us/blog/eight-steps-to-prepare-your-team-for-a-devops-platform-migration",{"_path":2732,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2733,"content":2739,"config":2745,"_id":2747,"_type":14,"title":2748,"_source":16,"_file":2749,"_stem":2750,"_extension":19},"/en-us/blog/eks-fargate-runner",{"title":2734,"description":2735,"ogTitle":2734,"ogDescription":2735,"noIndex":6,"ogImage":2736,"ogUrl":2737,"ogSiteName":670,"ogType":671,"canonicalUrls":2737,"schema":2738},"Setting up GitLab EKS Fargate Runners in just one hour","This detailed tutorial answers the question of how to leverage Amazon's AWS Fargate container technology for GitLab Runners.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663373/Blog/Hero%20Images/jeremy-lapak-CVvFVQ_-oUg-700unsplash.jpg","https://about.gitlab.com/blog/eks-fargate-runner","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Get started with GitLab EKS Fargate Runners in 1 hour and zero code, Iteration 1\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2023-05-24\",\n      }",{"title":2740,"description":2735,"authors":2741,"heroImage":2736,"date":2742,"body":2743,"category":791,"tags":2744},"Get started with GitLab EKS Fargate Runners in 1 hour and zero code, Iteration 1",[1239],"2023-05-24","\nLeveraging Amazon's AWS Fargate container technology for [GitLab Runners](https://docs.gitlab.com/runner/) has been a longstanding ask from our customers. This tutorial gets you up and running with the GitLab EKS Fargate Runner combo in less than an hour.\n\nGitLab has a pattern for this task for [Fargate](https://docs.aws.amazon.com/AmazonECS/latest/userguide/what-is-fargate.html) runners under AWS Elastic Container Service (ECS). The primary challenge with this solution is that AWS ECS itself does not allow for the overriding of what image is used when calling an ECS task. Therefore, each GitLab Runner manager ignores the gitlab-ci.yml `image:` tag and runs on the image preconfigured in the task during deployment of the runner manager. As a result, you'll end up creating runner container images that contain every dependency for all the software built by the runner, or you'll create a lot of runner managers per image — or both.\n\nI have long wondered if Fargate-backed Elastic Kubernetes Service (EKS) could get around this limitation since, by nature, Kubernetes must be able to run any image given to it.\n\n## The approach\n\nNothing takes the joy out of learning faster than a lot of complex setup before being able to get to the point of the exercise. To address this, this tutorial uses four things to dramatically reduce the time and steps required to get from zero to hero.\n\n1. AWS CloudShell to minimize the EKS Admin Tooling setup. This also leaves your local machine environment untouched so that other tooling configurations don't get modified.\n2. A project called **AWS CloudShell ”Run From Web” Configuration Scripts** to rapidly add additional tooling to CloudShell. This includes some hacks to get large Terraform templates to work on AWS CloudShell.\n3. EKS Blueprints — specifically, a Terraform example that implements both the [Karpenter autoscaler](https://aws.amazon.com/blogs/aws/introducing-karpenter-an-open-source-high-performance-kubernetes-cluster-autoscaler/) and Fargate, including for the kube-system namespace.\n4. A simple Helm install for GitLab Runner.\n\nAlthough you will be running CLI commands and editing config files, no coding is required in the sense that you won't have to build something complex from scratch and then maintain it yourself.\n\n## The results\n\nIt works! It can run 2 x 200 (max allowed per job) parallel “Hello, World” jobs on AWS Fargate-backed EKS in about 4 minutes, which demonstrates the unlimited scalability. It can also run a simple Auto DevOps pipeline, which proves out the ability to run a bunch of different containers.\n\nThe fact that the entire cluster - including kube-system - is Fargate backed reduces the Kubernetes specific long term SRE work to a much lower value approaching that of ECS Fargate clusters. Later on we discuss that this trade-off has a cost and how it can be reconfigured.\n\n## What makes it possible: Product-managed IaC that is an extensible framework\n\nToolkitting made up of Infrastructure as Code (IaC) is frequently referred to as “templates,” and these templates have a reputation of not aging well because there is no active stewardship of the codebase — they are thought of as a one-and-done effort. However, this term does not reflect reality well when the underlying IaC code is actually being product-managed. You can tell if something is being product-managed by using these markers:\n\n- It has a scope-bounded vision of what it wants to do for the community being served (customer).\n- It has active stewardship that keeps the codebase moving along, even if it is open source.\n- It seeks to incorporate strategic enhancements, a.k.a. new features.\n- Things that are broken are considered bugs and are actively eliminated.\n- There is a cadence of taking underlying version updates and for supporting new versions of the primary things they deploy.\n\nAs an extensible framework, EKS Blueprints:\n\n- Are purposefully architected to be extended by anyone.\n- Already have many extensions built.\n\nWhen implementing using EKS Blueprints and you come upon a new need, it is important to check if EKS Blueprints already handles that consideration - similarly to how you would look for Ruby Gems, NPM Modules or Python PyPI packages before building functionality from scratch.\n\nAll of the above are aspects of how the AWS EKS team is product-managing EKS Blueprints. They deserve a big round of applause because product-managing anything to prevent it from becoming yet another community-maintained shelfware project is a strong commitment that requires tenacity!\n\n## Reproducing the experiment\n\n### 1. Set up AWS CloudShell\n\n> **Note:** If you already have a fully persistent environment setup (like your laptop) with: AWS CLI, kubectl, Terraform, then you can avoid environment rebuilds when AWS CloudShell times out by using that instead.\n\nAWS CloudShell comes with kubectl, Git, and AWS CLI, which are all needed. However, we also need a few other scripts. More information about these scripts can be read in [my blog post on AWS CloudShell “Run For Web” Configuration Scripts](https://missionimpossiblecode.io/aws-cloudshell-run-from-web-configuration-scripts).\n\n> **Note:** The steps in this section up through the `git clone` from GitLab step (second clone operation) in the next section can be accomplished by running this: `s=prep-eksblueprint-karpenter.sh ; curl -sSL https://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/${s} -o /tmp/${s}; chmod +x /tmp/${s}; bash /tmp/${s}*` .\n\n1. Use the web console to login to an AWS account where you have admin permissions.\n2. Switch to the region of your choosing.\n3. In the bottom left of the console click the “CloudShell” icon.\n4. Copy and paste the following one-liner into the console to install Helm, Terraform, and the Nano text editor:\n   `curl -sSL https://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/add-all.sh -o $HOME/add-all.sh; chmod +x $HOME/add-all.sh; bash $HOME/add-all.sh`\n5. Since our Terraform template will grow larger than the 1GB limit of space in the $HOME directory, we need a workaround to use the template in one directory, but store the Terraform state in $HOME where it will be kept as long as 120 days. The following one-liner triggers a script that performs that setup for us, after which we can use the /terraform directory for our template:\n   `curl -sSL https://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/prep-for-terraform.sh -o $HOME/prep-for-terraform.sh; chmod +x $HOME/prep-for-terraform.sh; bash $HOME/prep-for-terraform.sh`\n\n### 2. Run Terraform EKS Blueprint\n\n> **Note:** If at any time you leave your AWS CloudShell long enough for your session to end, the /terraform directory will be tossed. Simply run the last script above and the first four steps below to make it operable again. This will most likely be necessary when it comes time to teardown the Terraform created AWS resources.\n>\n> Sometimes your AWS CloudShell credentials may expire with a message like: `Error: Kubernetes cluster unreachable: Get \">CLUSTER URL>\": getting credentials: exec: executable aws failed with exit code 255`. Simply refresh the entire browser tab where AWS CloudShell is running and you’ll generally have new credentials.\n\n#### Version safety\n\nThis tutorial uses a specific release of the EKS Blueprint project so that you have the known state at the time of publishing. The project version also cascades into the versions of all the many dependent modules. While it may also work with the latest version, the version at the time of writing was Version 4.29.0.\n\nThis tutorial also uses Terraform binary Version 1.4.5.\n\n#### Procedures\n\nIf, while using AWS CloudShell, you experience this error: `Error: configuring Terraform AWS Provider: no valid credential sources for Terraform AWS Provider found`, you will need to refresh your browser to update the cached credentials in the terminal session.\n\nPerform the following commands on the AWS CloudShell session:\n\n1. `git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git --no-checkout /terraform/terraform-aws-eks-blueprints` \n2. `cd /terraform/terraform-aws-eks-blueprints/`\n3. `git reset --hard tags/v4.29.0` #Version pegging to the code that this article was authored with.\n4. `git clone https://gitlab.com/guided-explorations/aws/eks-runner-configs/gitlab-runner-eks-fargate.git /terraform/terraform-aws-eks-blueprints/examples/glrunner`\n\n   **Note:** Like other EKS Blueprints examples, the GitLab EKS Fargate Runner example references EKS Blueprint modules with a relative directory reference. This is why we are cloning it into a subdirectory of the EKS Blueprints project.\n5. `cd /terraform/terraform-aws-eks-blueprints/examples/glrunner`\n6. `terraform init`\n\n   **Important**: If you are using AWS CloudShell and your session times out, the /terraform folder and the installed utilities will be gone. You would have to reproduce the above steps to get the Terraform template in a usable state again. This is most likely to happen when you go to use Terraform to delete the stack after playing with it for some days.\n\n   The next few instructions are from: **https://github.com/aws-ia/terraform-aws-eks-blueprints/blob/main/examples/karpenter/README.md#user-content-deploy**. Note the `-state` switch ensures our state is in persistent storage.\n7. `terraform apply -target module.vpc -state=$HOME/tfstate/runner.tfstate`\n8. `terraform apply -target module.eks -state=$HOME/tfstate/runner.tfstate`\n9. **Note:** If you receive “Error: The configmap ”aws-auth” does not exist”, re-run the same command - it will usually update successfully.\n10. `terraform apply -state=$HOME/tfstate/runner.tfstate`\n\nThe previous command will output a kubeconfig command that needs to be run to ensure subsequent kubectl commands work. Run that command. If you are in AWS CloudShell and did not copy the command, this command should work and map to the correct region:\n    `aws eks update-kubeconfig --region $AWS_DEFAULT_REGION --name \"glrunner\"`\n\nIf everything was done correctly, you will have an EKS cluster named `karpenter` in the CloudShell region web console like this:\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/eksclusterinconsole.png)  \n\nAnd the output of this console command `kubectl get pods -A` will look like this:\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/cliplaincluster.png)\n\nThe output of this console command `kubectl get nodes -A` will show the Fargate prefix:\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/clinodesarefargate.png)\n\n> **Note:** Notice that all the EKS extras (coredns, ebs-cni, and karpenter itself) are also running on Fargate. If you are willing to tolerate some regular Kubernetes nodes, you may be able to save cost by running always-on pods on regular Kubernetes hosts. Since this cluster runs Karpenter, you will not need to manually scale those hosts and EKS makes control plane and node updates easier.\n\n### 3. Install GitLab Runner\n\nThese and other commands are available in the GitLab documentation for [GitLab Runner Helm Chart](https://docs.gitlab.com/runner/install/kubernetes.html#additional-configuration).\n\n1. Create an empty GitLab project.\n2. Retrieve a GitLab Runner Token from the project. Keep in mind that using a project token is the easiest way to ensure your experiment runs only on the EKS Fargate Runner. Using a group token may cause your job to run on other runners already setup at your company. You can follow [“Obtain a token”](https://docs.gitlab.com/runner/register/#requirements) from the documentation if you need to.\n3. Perform the following commands back in the AWS CloudShell session.\n4. `nano runnerregistration.yaml`\n5. Paste the following:\n\n   ```yaml\n   gitlabUrl: https://_YOUR_GITLAB_URL_HERE_.com\n   runnerRegistrationToken: _YOUR_GITLAB_RUNNER_TOKEN_HERE_\n   concurrent: 200\n   rbac:\n     create: true\n   runners:\n     tags: eks-fargate\n     runUntagged: true\n     imagePullPolicy: if-not-present\n   envVars:\n     - name: KUBERNETES_POLL_TIMEOUT\n       value: 90  \n   ```\n\n   **Note:** Many more settings are discussed in the documentation for the [Kubernetes Executor](https://docs.gitlab.com/runner/executors/kubernetes.html). \n\n**Hard Lesson:** Using a setting for `concurrent` that is lower than our `parallel` setting in the GitLab job below results in all kinds of failures due to some job pods having to wait for an execution slot. Since it’s Fargate, there is no savings to keeping it lower and no negative impact to making it the complete parallel amount.\n\n6. Replace \\_YOUR_GITLAB_URL_HERE_ with your actual GitLab URL.\n7. Replace \\_YOUR_GITLAB_RUNNER_TOKEN_HERE_ with your actual runner token.\n8. Press CTRL-X to exit and press Y to the save prompt.\n9. `helm repo add gitlab https://charts.gitlab.io`\n10. `helm repo update gitlab`\n11. `helm install --namespace gitlab-runner --create-namespace runner1 -f runnerregistration.yaml gitlab/gitlab-runner`\n12. Wait for a few minutes and check the project’s list of runners for a new one with the tag `eks-fargate`\n\nIn AWS CloudShell the command `kubectl get pods -n gitlab-runner` should produce output similar to this:\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/runnerlist.png)\n\nAnd in the GitLab Runner list, it will look similar to this:\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/glrunnerlist.png)\n\n### 4. Run a test job\n\nThe simplest way to test GitLab Runner scaling is using the `parallel:` keyword to schedule multiple copies of a job. It can also be used to create a job matrix where not all jobs do the same thing.\n\nOne or more GitLab Runner Helm deployments can live in any namespace, so you have many to many mapping flexibility for how you think of runners and their Kubernetes context.\n\nIn the GitLab project where you created the runner, use the web IDE to create .gitlab-ci.yml and populate it with the following content:\n   ```yaml\n   parallel-fargate-hello-world:\n     image: public.ecr.aws/docker/library/bash\n     stage: build\n     parallel: 200\n     script:\n       - echo \"Hello Fargate World\"\n   ```\n\n**Hard Lesson:** After hitting the Docker hub image pull rate limit, I shifted to the same container in the AWS Public Elastic Container Registry (ECR), which has an [image pull rate limit](https://docs.aws.amazon.com/AmazonECR/latest/public/public-service-quotas.html) of 10 per second for this scenario.\n\nIf the job does not automatically start, use the pipeline page to force it to run.\n\nIf everything is configured correctly, your final pipeline status panel should look something like this:\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/completedjobs.png)\n\n### 5. Runner scaling experimentation\n\nThese and other commands are available in the GitLab documentation for [GitLab Runner Helm Chart](https://docs.gitlab.com/runner/install/kubernetes.html#additional-configuration).\n\nAdditional runners can be added by re-running the install command with a different name for the runner (if using the same token you’ll have two runners in the same group or project):\n\n`helm install --namespace gitlab-runner runner2 -f runnerregistration.yaml gitlab/gitlab-runner`\n\n200 jobs takes just under 2 minutes.\n\n#### 400 parallel jobs\n\nBy setting up a second identical job (with a unique job name), I was able to process 400 total jobs.\n\n**Hard Lesson:** The runner likes to schedule all jobs in a parallel job on the same runner instance. It does not seem to want to split a large job across multiple runners registered in the same project. So in order to get more than 200 jobs to process, I had to have two registered runners set to `concurrent:200` and two seperate jobs set to `parallel: 200`\n\n400 jobs takes just over 3 minutes.\n\n#### More than 400 parallel jobs\n\nAs I tried to scale higher, jobs started to hang. I tried specifically routing jobs to five runners each capable of 300 parallel jobs. I also tried multiple stages and used a hack of `needs []` to get simultaneous execution of jobs in multiple stages.\n\nI was not successful and there could be a wide variety of reasons why — a riddle for a future iteration.\n\nThis command can be used to update a runner's settings after editing the Helm values file (including the token to move the runner to another context): \n\n`helm upgrade --namespace gitlab-runner -f runnerregistration.yaml runner2 gitlab/gitlab-runner`\n\nI found that when I pushed the limits, I would sometimes end up with hung pods until I understood what needed adjusting. Leaving hung Fargate pods will add up to a lot of cash because the pricing assumes very short execution times. This command helps you terminate job pods without accidentally terminating the runner manager pods:\n\n`kubectl get pods --all-namespaces --no-headers |  awk '{if ($2 ~ \"_YOUR_JOB_POD_PREFACE_*\") print $2}' | xargs kubectl -n _YOUR_RUNNER_NAMESPACE_ delete pod`\n\nDon't forget to replace \\_YOUR_RUNNER_NAMESPACE_ and \\_YOUR_JOB_POD_PREFACE_ “_YOUR_JOB_POD_PREFACE\\_” is the unique preface of ONLY the jobs from a given runner followed by the wildcard star character => \\*\n\nTo uninstall a runner, use:\n\n`helm delete --namespace gitlab-runner runner1`\n\n#### Testing Auto DevOps to prove `image:` tag is honored\n\nTechnically testing Auto DevOps to prove the `image:` tag is honored this isn’t entirely necessary since the above job loads the bash container without the container being specified in any of the runner or infrastructure setup. However, I performed this as a litmus test anyway.\n\nFollow these steps:\n\n1. Create a new project by clicking the “+” sign in the top bar of GitLab.\n2. On the next page, select “New Project/Repository”.\n3. Then “Create from template”.\n4. Select “Ruby on Rails” (first choice).\n5. Once the project creation is complete, register an EKS runner to it (or re-register the existing runner to the new project).\n6. In the project, select “Settings (Gear Icon)” => “CI/CD” => Auto DevOps => Default to Auto DevOps pipeline.\n7. Click “Save changes”.\n\nThe Auto DevOps pipeline should run. If you don’t have a cluster wired up, it will mainly do security scanning, which is sufficient to prove that arbitrary containers can be used by the Fargate-backed GitLab Runner.\n\n### 6. Solution tuning via extensible platform\n\nEKS Blueprints is not only product-managed, it is also an extensible platform or framework. In the spirit of fully leveraging the extensible product managed EKS Blueprints project, you will always want to check if Blueprints is already instrumented for your scenario before writing code. Additionally, if you must write code, you can consider contributing it as an EKS Blueprint extension so the community can take on some responsibility for maintaining it.\n\n1. The EKS Blueprints Managed IaC has a dizzing number of tuning parameters and optional extensions. For instance, if you want the full GitLab Runner logs collected to AWS CloudWatch, it is a simple configuration to add fluentd log agent to push custom logs to CloudWatch.\n2. Using Fargate for always-on containers is a trade-off of compute costs to get rid of Kubernetes node management overhead. This trade-off can be easily reversed in this example by removing the \"kube-system\" from \"fargate_profiles\" - since Karpenter is also installed and configured, the hosts will autoscale for load.\n\n### 7. Teardown\n\nThe next few instructions are from https://github.com/aws-ia/terraform-aws-eks-blueprints/blob/main/examples/karpenter/README.md#user-content-destroy.\n\nIf you are using AWS CloudShell and the /terraform directory no longer exists, perform these steps to re-prepare AWS CloudShell to perform teardown.\n\nIf you are not using AWS CloudShell, skip forward to “Teardown steps”.\n\n1. `curl -sSL https://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/add-all.sh -o $HOME/add-all.sh; chmod +x $HOME/add-all.sh; bash $HOME/add-all.sh`\n2. `curl -sSL https://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/prep-for-terraform.sh -o $HOME/prep-for-terraform.sh; chmod +x $HOME/prep-for-terraform.sh; bash $HOME/prep-for-terraform.sh`\n3. `git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git --no-checkout /terraform/terraform-aws-eks-blueprints` \n4. `cd /terraform/terraform-aws-eks-blueprints/`\n5. `git reset --hard tags/v4.29.0`\n6. `git clone https://gitlab.com/guided-explorations/aws/eks-runner-configs/gitlab-runner-eks-fargate.git /terraform/terraform-aws-eks-blueprints/examples/glrunner`\n\n   > **Note:** The above steps can be accomplished by running this: `s=prep-eksblueprint-karpenter.sh ; curl -sSL https://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/${s} -o /tmp/${s}; chmod +x /tmp/${s}; bash /tmp/${s}` .\n\n7. `cd /terraform/terraform-aws-eks-blueprints/examples/glrunner`\n8. `terraform init`\n\nFollow these teardown steps:\n\n1. `helm delete --namespace gitlab-runner runner1`\n2. `helm delete --namespace gitlab-runner runner2`\n3. `terraform destroy -target=\"module.eks_blueprints_kubernetes_addons\" -auto-approve -state=$HOME/tfstate/runner.tfstate`\n4. `terraform destroy -target=\"module.eks\" -auto-approve -state=$HOME/tfstate/runner.tfstate`\n5. **Note:** If you receive an error about refreshing cached credentials, simply re-run the command again and it will usually update successfully.\n6. `terraform destroy -auto-approve -state=$HOME/tfstate/runner.tfstate`\n\n### Iteration _n_ : We would love your input\n\nThis blog is \"Iteration 1\" precisely because it has not been production load-tested nor specifically cost-engineered. And obviously a “Hello, World” script is not testing much in the way of real work. I really set out to understand if we could run arbitrary containers in a GitLab Fargate setup (and we can) and then got curious about what parallel job scaling might look like with Fargate (and it looks good). The Kubernetes Runner executor has many, many available customizations and it is likely that scaling a production loaded implementation on EKS will reveal the need to tune more of these parameters. \n\n#### **Collaborative contribution challenges**\n\nHere are some ideas for further collaborative work on this project:\n\n- To push the limits, create a configuration that can scale to 1000 simultaneous jobs.\n- An aws-logging config map that uploads runner pod logs to AWS CloudWatch.\n- A cluster configuration where runner managers and everything that is not a runner job run on non-Fargate nodes – if and only if it will be cheaper than Fargate running 24 x 7.\n- A Fargate Spot configuration. It’s important that compute type be noted as a runner tag and it’s important that the same cluster has non-spot instances because some jobs should not run on spot compute and the decision whether to do so should be available to the GitLab CI Developer who is creating an pipeline.\n\n#### Other runner scaling initiatives\n\nWhile GitLab is building the Next Runner Auto-scaling Architecture, [Kubernetes refinements are not a part of this architectural initiative](https://docs.gitlab.com/ee/architecture/blueprints/runner_scaling/#proposal).\n\n#### Everyone can contribute\n\nThis tutorial, as well as code for additional examples, will be maintained as open source as a GitLab Alliances Solution and we’d love to have your contributions as you iterate and discover the configurations necessary for your real-world scenarios. This tutorial is in a group wiki and the code will be in the projects under that group here: [AWS Guided Explorations for EKS Runner Configurations](https://gitlab.com/guided-explorations/aws/eks-runner-configs/gitlab-runner-eks-fargate/-/blob/main/README.md). \n\nPhoto by [Jeremy Lapak](https://unsplash.com/@jeremy_justin?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/runner?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,1984,1243],{"slug":2746,"featured":6,"template":686},"eks-fargate-runner","content:en-us:blog:eks-fargate-runner.yml","Eks Fargate Runner","en-us/blog/eks-fargate-runner.yml","en-us/blog/eks-fargate-runner",{"_path":2752,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2753,"content":2759,"config":2764,"_id":2766,"_type":14,"title":2767,"_source":16,"_file":2768,"_stem":2769,"_extension":19},"/en-us/blog/elite-team-strategies-to-secure-software-supply-chains",{"title":2754,"description":2755,"ogTitle":2754,"ogDescription":2755,"noIndex":6,"ogImage":2756,"ogUrl":2757,"ogSiteName":670,"ogType":671,"canonicalUrls":2757,"schema":2758},"How elite DevOps teams secure the software supply chain","The time is now to integrate security into your DevOps processes - your business will be better for it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667466/Blog/Hero%20Images/GitLab-Sec.png","https://about.gitlab.com/blog/elite-team-strategies-to-secure-software-supply-chains","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How elite DevOps teams secure the software supply chain\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2022-01-06\",\n      }",{"title":2754,"description":2755,"authors":2760,"heroImage":2756,"date":2761,"body":2762,"category":875,"tags":2763},[1454],"2022-01-06","\nIn 2022, the question is not if DevOps teams will integrate security into the software supply chain, but when and how quickly. The high-profile supply chain attacks of 2020 and 2021 have forced organizations to do more to protect themselves and their customers. Every DevOps team should strive to be an elite team in this area, aka [DevSecOps](/topics/devsecops/), as doing less will leave your software supply chains vulnerable.\n\nWhile many organizations might have been hesitant to blend security and DevOps over fears of how it would impact deployment schedules and performance, Google Cloud’s DevOps Research and Assessment (DORA) team concluded in its [“Accelerate State of DevOps 2021 Report”](https://services.google.com/fh/files/misc/state-of-devops-2021.pdf) that “development teams that embrace security see significant value driven to the business.”\n\nTeams that integrate security practices throughout their development process are 1.6 times more likely to meet or exceed their organizational goals, according to the report, which is co-sponsored by GitLab. Meantime, elite performers that met or exceeded their reliability targets were twice as likely to have security integrated into their development process.\n\nTo get to this elite level, though, security has to be baked into DevOps processes at the earliest stages. DevOps and security teams need to collaborate to ensure that they understand one another’s goals and speak the same technical language so they can develop DevSecOps best practices that effectively and efficiently satisfy those goals.\n\nOur newly released [“Guide to Software Supply Chain Security”](https://learn.gitlab.com/devsecops-aware/software-supply-chain-security-ebook) explains the urgency of protecting the supply chain now – no one wants a repeat of the SolarWinds or Colonial Pipeline attacks – and how the U.S. government will soon require many organizations to do so.\n\nWe help DevOps teams frame what it means to be elite, including moving beyond basic protections (using strong passwords, applying software patches in a timely manner, and implementing multi-factor authentication) to deploying these best practices:\n\n* Apply common controls for security and compliance\n* Automate common controls and CI/CD\n* Apply zero-trust principles\n* Inventory all tools and access, including infrastructure as code\n* Consider unconventional scale to find unconventional vulnerabilities\n* Secure containers and orchestrators\n\nThe guide also explains in detail the types of security scans that bolster supply chain security, including container scanning, dependency scanning, fuzz testing, dynamic application security testing (DAST) and static application security testing (SAST), license compliance, and secret detection.\n\nFor those unsure where they fall on the spectrum of supply chain security readiness, we’ve developed a two-minute quiz that examines how you handle the security of APIs, dependencies, and other critical areas.  Use your ranking to plot your transformation to an elite team.\n\nAs the DORA report showed, there is room for improvement across the industry as fewer than two-thirds of DevOps teams are doing these simple security practices:\n\n* 63% invite InfoSec teams early and often\n* 60% perform security reviews\n* 58% test for security\n* 54% integrate security reviews into every phase\n* 49% build pre-approved code\n\nThere is little doubt that 2022 will have more high-profile supply chain attacks, but our guide can help you develop DevOps security processes that will protect your organization and your customers.\n\n## Read more on elite teams and supply chain security here:\n- [How to make your DevOps team elite performers](/blog/how-to-make-your-devops-team-elite-performers/)\n- [How a DevOps Platform helps protect against supply chain attacks](https://about.gitlab.com/blog/devops-platform-supply-chain-attacks/)\n- [DevSecOps FAQ: Get up to speed](https://about.gitlab.com/blog/devsecops-faq-get-up-to-speed-on-this-hot-devops-area/)\n",[9,875,683],{"slug":2765,"featured":6,"template":686},"elite-team-strategies-to-secure-software-supply-chains","content:en-us:blog:elite-team-strategies-to-secure-software-supply-chains.yml","Elite Team Strategies To Secure Software Supply Chains","en-us/blog/elite-team-strategies-to-secure-software-supply-chains.yml","en-us/blog/elite-team-strategies-to-secure-software-supply-chains",{"_path":2771,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2772,"content":2778,"config":2786,"_id":2788,"_type":14,"title":2789,"_source":16,"_file":2790,"_stem":2791,"_extension":19},"/en-us/blog/enable-slos-as-code",{"title":2773,"description":2774,"ogTitle":2773,"ogDescription":2774,"noIndex":6,"ogImage":2775,"ogUrl":2776,"ogSiteName":670,"ogType":671,"canonicalUrls":2776,"schema":2777},"Enable SLO-as-Code with Nobl9 and GitLab","Learn how to take advantage of a streamlined SLO process and maintain a single source of truth for SLO definitions within your DevOps platform.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669455/Blog/Hero%20Images/nobl9_1.jpg","https://about.gitlab.com/blog/enable-slos-as-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Enable SLO-as-Code with Nobl9 and GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Quan To\"},{\"@type\":\"Person\",\"name\":\"Jeremy Cooper\"},{\"@type\":\"Person\",\"name\":\"Ian Bartholomew\"}],\n        \"datePublished\": \"2022-05-09\",\n      }",{"title":2773,"description":2774,"authors":2779,"heroImage":2775,"date":2783,"body":2784,"category":726,"tags":2785},[2780,2781,2782],"Quan To","Jeremy Cooper","Ian Bartholomew","2022-05-09","\n\nNobl9 recently integrated with GitLab's CI to enable a consistent mechanism to publish Service Level Objectives (SLO) definitions from GitLab to Nobl9. With this SLO-as-Code integration, DevOps teams can take action when their error budgets are burning too fast or are about to be exhausted.\n\nIn today’s systems, 100% uptime isn’t realistic given the complex architectures and dependencies involved. SLOs enable you to define targets and have an error budget for tracking what's “good enough.” For example, you can target uptime of 99.9%, 99%, or even 95% because what truly matters is how much downtime or errors are acceptable before there is real customer impact.\n\nTypically when organizations think about SLO-as-Code, they must use separate products to ensure their SLO definitions are always in sync with whatever tool they are using. This usually includes running command-line tools manually or building custom integrations within their code repositories.    \n\nWith this CI configuration, every time you build your repo, GitLab will call [sloctl](https://docs.nobl9.com/sloctl-user-guide), our command-line tool, and push the SLO definition to Nobl9. Customers can continue using GitLab to version their SLO definitions and keep their SLOs consistent. This ensures your SLO definition will always be up to date with what’s in Nobl9 and removes any discrepancies over what the latest SLO definition actually is. SREs, engineers, and anyone using the SLOs can still debate what the targets need to be, but there will always be a definitive source of truth in your code repository on what the current definition is.\n\n## Getting started\n\nTo set this up in GitLab, follow these steps:\n\n**1.** Select Settings -> CI/CD, and click the Expand button next to Variables. \n\n![CICD_settings](https://about.gitlab.com/images/blogimages/nobl9_2.png)\n\n\n**2.** Add the following variables:\n\n- CLIENT_ID\n\n- CLIENT_SECRET\n\n- ACCESS_TOKEN\n\n- PROJECT \n\n- SLOCTL_YML\n\n\n**Note:** If you haven’t done so already, you’ll need to install sloctl. You can install the executable on your local machine by following the instructions in the [user guide](https://docs.nobl9.com/sloctl-user-guide#setting-up-sloctl). Once sloctl is installed, you can run the following command to retrieve your CLIENT_ID, CLIENT_SECRET, and ACCESS_TOKEN:\n\n\n    cat ~/.config/nobl9/config.toml\n\n\n    The PROJECT value is the name of the project inside Nobl9 that your SLO belongs \n    to.\n\n\n    The SLOCTL_YML value is the Nobl9 YAML file you want to push to Nobl9 on each \n    change.\n\n\n\n![install_sloctl](https://about.gitlab.com/images/blogimages/nobl9_3.png)\n\n\n\n**3.** Create the CI/CD job to apply the YAML, by going to CI/CD -> Jobs and clicking “Create CI/CD configuration file”. \n\n\n\n![create_config](https://about.gitlab.com/images/blogimages/nobl9_4.png)\n\n\n\nEnter the following code in the _.gitlab.ci.yml_ file:\n\n\n        variables:\n\n\n          CLIENT_ID: $NOBL9_CLIENT_ID\n\n\n          CLIENT_SECRET: $NOBL9_CLIENT_SECRET\n\n\n          ACCESS_TOKEN: $NOBL9_ACCESS_TOKEN\n\n\n          PROJECT: $NOBL9_PROJECT\n\n\n          SLOCTL_YML: $SLOCTL_YML\n\n\n        include:\n\n\n          - project: 'nobl9/nobl9-ci-template'\n\n\n            ref: main\n\n\n            file: '/nobl9.gitlab-ci.yml'\n\n\n\n\n**4.** Kick off a build. Any changes to the SLOCTL_YML file that you reference will now automatically be pushed to Nobl9 once the updates are committed.\n\nBy partnering with GitLab and providing a convenient CI script and a command-line tool for managing SLOs, Nobl9 has truly enabled SLO-as-Code. We encourage existing Nobl9 customers who use GitLab to give it a try. \n\nIf you haven’t experienced Nobl9 yet, you can sign up for a free 30-day trial at [nobl9.com/signup](http://nobl9.com/signup) to see all that it has to offer.\n\n_Quan To is Senior Director of Product Management, Jeremy Cooper is Senior Solutions Engineer, and Ian Bartholomew is SRE Manager at Nobl9._\n\nCover image by [Vardan Papikayan](https://unsplash.com/@varpap) on [Unsplash](https://unsplash.com/photos/JzE1dHEaAew)\n",[231,976,9],{"slug":2787,"featured":6,"template":686},"enable-slos-as-code","content:en-us:blog:enable-slos-as-code.yml","Enable Slos As Code","en-us/blog/enable-slos-as-code.yml","en-us/blog/enable-slos-as-code",{"_path":2793,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2794,"content":2800,"config":2805,"_id":2807,"_type":14,"title":2808,"_source":16,"_file":2809,"_stem":2810,"_extension":19},"/en-us/blog/enables-rapid-innovation",{"title":2795,"description":2796,"ogTitle":2795,"ogDescription":2796,"noIndex":6,"ogImage":2797,"ogUrl":2798,"ogSiteName":670,"ogType":671,"canonicalUrls":2798,"schema":2799},"GitLab uniquely enables rapid innovation","Learn about some of the ways GitLab can uniquely enable your developers to innovate more rapidly.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681603/Blog/Hero%20Images/rapids-cover-1275x750.jpg","https://about.gitlab.com/blog/enables-rapid-innovation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab uniquely enables rapid innovation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2020-09-30\",\n      }",{"title":2795,"description":2796,"authors":2801,"heroImage":2797,"date":2802,"body":2803,"category":1359,"tags":2804},[1727],"2020-09-30","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nA challenge that organizations often face is the amount of time spent maintaining their IT systems vs. the time spent innovating and developing differentiating features for customers.  This challenge has become even more difficult during a global pandemic where working from home makes it harder to engage with your customers in person and digital channels have become the primary vehicle to do business with consumers of your services and products. Rapid innovation means your organization and teams can deliver lovable features faster and get value into the hands of customers sooner. This is more urgent than ever before to remain competitive and ultimately survive in this new business reality, and requires your developers to spend more time creating and developing code rather than managing multiple disparate tools, environments, and processes.\n\nGitLab uniquely enables rapid innovation by simplifying the adoption of DevOps practices so that your developers can spend more time creating innovative features and applications that matter to your customers. \n\nWatch this video (~6 mins) to see these rapid innovation capabilities in action.\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/MLrqJ1sxkjQ\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\nThe following is a non-exhaustive list of ways that GitLab helps your teams to achieve rapid innovation.\n\n### Easy collaboration across multiple roles and asset types\n\nApplication creators and stakeholders within every organization come from many disciplines, often times each using their own file types to get work done. For example:\n* **Product Designers** typically work with the output of their design tools, which could be Figma or Sketch files, images, or graphs.\n* **Developers** mainly work with programming language source files (code).\n* **DevOps Engineers** might use Infrastructure-as-code files, like Terraform, CloudFormation, or Azure Resource Manager files\n* **Database Administrators** often use Data Definition Language (DDL), Data Manipulation Language (DML), and SQL scripts.\n\nWhereas other CI/CD solutions typically stick to one type of asset, with GitLab, stakeholders can easily collaborate and contribute using their preferred asset types as part of a single conversation across the whole software development lifecycle. Not only does this enrich the conversation between all stakeholders, but it speeds up the innovation process by lowering the barrier for cross team collaboration.\n\n![issue with design picture](https://about.gitlab.com/images/blogimages/enables-rapid-innovation/issue-with-design-picture.png){: .shadow.medium.center.wrap-text}\n\n### Security and compliance\n\nSecurity and compliance is usually a top priority for CIOs and directly affects how code is developed throughout the end-to-end SDLC. It's critical to protect your IP and equally important for customers to take confidence in the fact that their sensitive data is safe and secure. Instead of putting together your own mechanisms to check security vulnerabilities, license compliance, dependency scanning, static and dynamic application security testing, performance, fuzz testing, among others, GitLab provides you with built-in templates to do all these from within your CI pipeline. All you have to do is include them in your pipeline and voila! By leveraging these templates you can more quickly focus on creating and innovating.\n\n![build and test pipeline](https://about.gitlab.com/images/blogimages/enables-rapid-innovation/build-and-test-pipeline.png){: .shadow.medium.center.wrap-text}\n\n### Review Apps\n\nWouldn’t it be great if you could effortlessly enable all stakeholders to review the application changes BEFORE they are merged to the main branch? Instead of orchestrating and putting together a review environment and building, loading and executing the application to it for every update, you can leverage GitLab Review Apps capability, which streamlines the review process by automatically creating (and cleaning up) temporary review environments with every change. This let's developers focus on innovation instead of environment setup.\n\n![review pipeline](https://about.gitlab.com/images/blogimages/enables-rapid-innovation/review-pipeline.png){: .shadow.medium.center.wrap-text}\n\n### Deep Kubernetes integration\n\nAnother way that GitLab uniquely enables you to innovate more rapidly is the deep integration to Kubernetes clusters, which not only includes the automatic creation of and deployment to K8s clusters, but also includes automatic cluster monitoring, per application metrics, and the one-click deployment and management of a variety of supplemental applications such as a Web Application Firewall, Cert-Manager, Prometheus, GitLab Runner, Crossplane, JupyterHub, Elastic Stack, Fluentd, Knative, and GitLab Container Network Policies.\n\nKubernetes clusters can be set up by developers at their project level or by admins at the group levels, enabling developers to take advantage of container-based development best practices without needing deep subject matter expertise. This allows developers to spend more of their time working on what matters: creating great product.\n\n![K8s apps](https://about.gitlab.com/images/blogimages/enables-rapid-innovation/K8s-apps.png){: .shadow.medium.center.wrap-text}\n\n### Automatic environments management\n\nGitLab will automatically spin up and tear down environments as needed by the CI/CD pipeline. For example, GitLab automatically spins up pods for the review, staging and production environments. All this infrastructure automation removes the burden of having to manage infrastructure off of your shoulders so that you can spend more time developing and creating code faster.\n\n![environments](https://about.gitlab.com/images/blogimages/enables-rapid-innovation/environments.png){: .shadow.medium.center.wrap-text}\n\n### Pipeline template creation\n\nOnce you create a pipeline based on the best practices for your organization, you can turn it into a pipeline template that your development teams can use. Other developers can reuse this new template in their projects so that they can get right to creating and innovating differentiating features and applications that matter to their consumers.\n\n![steps to create pipeline template](https://about.gitlab.com/images/blogimages/enables-rapid-innovation/steps-create-pipeline-template.png){: .shadow.medium.center.wrap-text}\n\n### Auto DevOps\n\nIf you’d like to leverage a complete DevOps predefined CI/CD pipeline, which is based on best practices, why not use Auto DevOps? Auto DevOps allows you to automatically detect, build, test, deploy, and monitor your applications. Leveraging CI/CD best practices and tools, Auto DevOps aims to simplify the setup and execution of a mature and modern software development lifecycle. The Auto DevOps pipeline shifts work left to find and prevent defects as early as possible in the software delivery process. The pipeline then deploys the application to staging for verification and then to production in an incremental fashion. As you can see, Auto DevOps saves you from implementing your own pipeline so that you can spend more time innovating.\n\n![partial auto devops pipeline](https://about.gitlab.com/images/blogimages/enables-rapid-innovation/partial-Auto-DevOps-pipeline.png){: .shadow.medium.center.wrap-text}\n\nThese are some of the ways GitLab uniquely enables you to innovate more rapidly by ensuring that everything is where you need it when you need it, empowering you to focus on creating and developing innovations, delivering solutions faster, putting new products and services more quickly in the hands of your customers and remaining competitive. And all within a single application.\n\nFor more videos and demos visit [Learn@GitLab](https://about.gitlab.com/learn/).\nTo learn more about how GitLab can help you innovate more rapidly visit [the GitLab website](https://about.gitlab.com)\n\nCover image by [Florian Bernhardt](https://unsplash.com/@floww?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/rapids?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[109,9,1731],{"slug":2806,"featured":6,"template":686},"enables-rapid-innovation","content:en-us:blog:enables-rapid-innovation.yml","Enables Rapid Innovation","en-us/blog/enables-rapid-innovation.yml","en-us/blog/enables-rapid-innovation",{"_path":2812,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2813,"content":2819,"config":2826,"_id":2828,"_type":14,"title":2829,"_source":16,"_file":2830,"_stem":2831,"_extension":19},"/en-us/blog/ensure-auto-devops-work-after-helm-stable-repo",{"title":2814,"description":2815,"ogTitle":2814,"ogDescription":2815,"noIndex":6,"ogImage":2816,"ogUrl":2817,"ogSiteName":670,"ogType":671,"canonicalUrls":2817,"schema":2818},"Adapting Auto DevOps & managed apps to Helm repo changes","The Helm stable repository will be removed this month. We explain how to keep Auto DevOps and GitLab Managed Apps working.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667182/Blog/Hero%20Images/maximilian-weisbecker-Esq0ovRY-Zs-unsplash.jpg","https://about.gitlab.com/blog/ensure-auto-devops-work-after-helm-stable-repo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to keep GitLab Auto DevOps and Managed Apps working after Helm stable repo is removed\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Thong Kuah\"}],\n        \"datePublished\": \"2020-11-09\",\n      }",{"title":2820,"description":2815,"authors":2821,"heroImage":2816,"date":2823,"body":2824,"category":726,"tags":2825},"How to keep GitLab Auto DevOps and Managed Apps working after Helm stable repo is removed",[2822],"Thong Kuah","2020-11-09","The Helm project announced that the Helm Stable repository will be [removed](https://www.cncf.io/blog/important-reminder-for-all-helm-users-stable-incubator-repos-are-deprecated-and-all-images-are-changing-location/) on November 13. This change impacts GitLab [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/index.html) and [GitLab Managed Apps](https://docs.gitlab.com/ee/update/removals.html).\n\n## How Auto DevOps is impacted\n\nRemoving the Helm stable repository affects Auto Deploy and Auto Review Apps stages of Auto DevOps. The deploy jobs from these stages will fail because they cannot fetch the Helm stable repository. GitLab has mitigated this in GitLab 13.6 by switching to a [Helm Stable Archive repository](https://gitlab.com/gitlab-org/cluster-integration/helm-stable-archive) maintained by GitLab.\n\nIn case Auto DevOps pipelines are failing because of this problem, you can:\n\n1. Upgrade to GitLab 13.6.0 when it is released, or\n1. If you are on GitLab 13.5.X, you can also upgrade to GitLab 13.5.3\n1. If you are on GitLab 13.4.X, you can also upgrade to GitLab 13.4.6\n1. Specify a newer version of the `auto-deploy-image` image, which contains the fix, in your `.gitlab-ci.yml` file:\n\n    ```\n    include:\n      - template: Auto-DevOps.gitlab-ci.yml\n\n    .auto-deploy:\n      image: \"registry.gitlab.com/gitlab-org/cluster-integration/auto-deploy-image:v1.0.7\"\n    ```\n\nNot all users will be affected by the change. Users who are not using Helm as part of Auto DevOps, for example, those that are not using Kubernetes (Auto Deploy to AWS targets) will not be impacted by the removal of the Helm stable respository.\n\n## How GitLab managed apps are impacted\n\nThe removal of the Helm stable repository affects installation of the Ingress, Fluentd, Prometheus, and Sentry apps. These apps will fail to install as the Helm stable repository is removed. For the following:\n\n[GitLab Managed Apps](https://docs.gitlab.com/ee/update/removals.html): GitLab has mitigated this problem in [GitLab 13.5.0](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/44875) by switching to a [Helm Stable Archive repository](https://gitlab.com/gitlab-org/cluster-integration/helm-stable-archive) maintained by GitLab.\n\nThere are a few ways to fix app installation failures because of the Helm stable repository was removed.\n\n1. Upgrade to GitLab 13.5.0 or later, or\n1. If you are on GitLab 13.4.X, you can also upgrade to GitLab 13.4.6.\n1. If you are on GitLab 13.3.X, you can also upgrade to GitLab 13.3.8.\n\nGitLab has mitigated the problem in [GitLab 13.6](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/45487) for users with [GitLab Managed Apps using CI/CD](https://docs.gitlab.com/ee/update/removals.html) by switching to a [Helm Stable Archive repository](https://gitlab.com/gitlab-org/cluster-integration/helm-stable-archive) maintained by GitLab.\n\nIn case GitLab Managed Apps CI/CD installation pipelines are failing because of this problem, you can:\n\n1. Upgrade to GitLab 13.6.0 when it is released, or\n1. Specify a newer version of the `cluster-applications` image, which contains the fix, in your `.gitlab-ci.yml` file:\n\n    ```\n    include:\n      - template: Managed-Cluster-Applications.gitlab-ci.yml\n\n    apply:\n      image: \"registry.gitlab.com/gitlab-org/cluster-integration/cluster-applications:v0.34.1\"\n    ```\n\nIf you are installing applications that were not hosted in the Helm stable repository such as GitLab Runner, these applications will not be affected.\n\n## Learn more about the project\n\n- [Epic for Helm chart deprecation](https://gitlab.com/groups/gitlab-org/-/epics/4695)\n- [Information on error alert](https://docs.gitlab.com/ee/topics/autodevops/#error-error-initializing-looks-like-httpskubernetes-chartsstoragegoogleapiscom-is-not-a-valid-chart-repository-or-cannot-be-reached)\n- [Information on Helm chart change from CNCF](https://www.cncf.io/blog/important-reminder-for-all-helm-users-stable-incubator-repos-are-deprecated-and-all-images-are-changing-location/)\n\nCover image by Maximilian Weisbecker on [Unsplash](https://unsplash.com/)\n{: .note}\n",[9,109,726],{"slug":2827,"featured":6,"template":686},"ensure-auto-devops-work-after-helm-stable-repo","content:en-us:blog:ensure-auto-devops-work-after-helm-stable-repo.yml","Ensure Auto Devops Work After Helm Stable Repo","en-us/blog/ensure-auto-devops-work-after-helm-stable-repo.yml","en-us/blog/ensure-auto-devops-work-after-helm-stable-repo",{"_path":2833,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2834,"content":2840,"config":2846,"_id":2848,"_type":14,"title":2849,"_source":16,"_file":2850,"_stem":2851,"_extension":19},"/en-us/blog/environment-friction-cycle",{"title":2835,"description":2836,"ogTitle":2835,"ogDescription":2836,"noIndex":6,"ogImage":2837,"ogUrl":2838,"ogSiteName":670,"ogType":671,"canonicalUrls":2838,"schema":2839},"How GitLab eliminates value stream friction in dev environments","It is important to have the complete picture of scaled effects in view when designing automation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682507/Blog/Hero%20Images/sandeep-singh-3KbACriapqQ-unsplash.jpg","https://about.gitlab.com/blog/environment-friction-cycle","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab can eliminate the massive value stream friction of developer environment provisioning and cleanup\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2022-11-17\",\n      }",{"title":2841,"description":2836,"authors":2842,"heroImage":2837,"date":2843,"body":2844,"category":791,"tags":2845},"How GitLab can eliminate the massive value stream friction of developer environment provisioning and cleanup",[1239],"2022-11-17","\n\nA strong DevOps value stream drives developer empowerment as far left as possible. In GitLab, this is embodied in per-feature branch merge requests that are rich with automated code quality and defect information - including not only findings - but automated remediation capabilities and collaboration. Some defects and code quality issues can only be found by analyzing a running copy of the application, including DAST, IAST, fuzzing and many others. GitLab has built a fully automated, seamless developer environment lifecycle management approach right into the developer experience. In fact, it’s so seamlessly built-in, it can be easy to overlook how critical developer environment lifecycle management is. This article will highlight why and how GitLab adds value using developer environment automation. In addition, while GitLab provides out of the box developer environment lifecycle management for Kubernetes, this article demonstrates an approach and a working example of how to extend that capability to other common cloud-based application framework PaaS offerings.\n\n## Provisioning of development environments is generally a negative feedback loop\n\nIn a prior job, I worked on a DevOps transformation team that supported multiple massive shared development environments in AWS. They were accessible to more than 4,000 developers working to build more than 100 SaaS applications and utility stacks. In the journey to the AWS Cloud, each development team took ownership of the automation required to deploy their applications. Since developers were able to self-service, over time this solved the problem of development friction generated by waiting for environments to be provisioned for testing, feature experiments, integration experiments, etc. \n\nHowever, the other half of the problem then ballooned - environment sprawl - with an untold number of environments idling without management and without knowledge of when they could be torn down. Over time the development environment cost became a significant multiple of production costs. The cloud has solved problems with environment provisioning bottlenecks due to hardware acquisition and provisioning, but this can also inadvertently fuel the high costs of unmanaged sprawl. This problem understandably causes organizations to raise administrative barriers to new development environments.\n\nIn many organizations this becomes a vicious cycle - most especially if developer environments are operated by a different team, or worse, on an independent budget. Environment justification friction usually comes quickly after discovering the true cost of the current running environments. Developers then have to justify the need for new environment requests and they have to make the gravest of promises to disband the environment as soon as they are done. Another friction arises when a separate group is tasked with cost controls and environment provisioning and cleanup. This introduces friction in the form of administrative and work queueing delays. Coordination friction also crops up because an accurate understanding of exactly what is needed for an environment can be challenging to convey. When mistakes are made or key information is missing, developers must go back and forth on support requests to get the configuration completely correct.\n\n## Partial automation can worsen the problem\n\nThat’s the first half of the environment lifecycle, but as I mentioned, even if that is fully automated and under the control of developers, the other half of the feedback loop comes into play. When a given development environment has fulfilled its initial justification reason, the team does not want to destroy it because environments are so hard to justify and create. Then the sprawl starts and, of course, the barriers to new environments are raised even higher. This is a classic negative feedback loop.\n\nSystems theory shows us that sometimes there are just a few key factors in stopping or even reversing a negative feedback loop. Lets take this specific problem apart and talk about how GitLab solves for it.\n\n## Treat developer environments as a complete lifecycle\n\nIn the prior example it is evident that by leaving out the last stage of the environment lifecycle - retirement or tear down - we still end up with a negative feedback loop. Removing provisioning friction actually makes the problem worse if retirement friction is not also addressed at the same time. Solutions to this problem need to address the entire lifecycle to avoid impacting value stream velocity. Neglecting or avoiding the retirement stage of a lifecycle is a common problem across all types of systems. In contrast, by addressing the entire lifecycle we can transform it from being a negative feedback loop to a managed lifecycle.\n\n## The problems of who and when\n\nBuried inside the insidious friction loop are a couple key coordination problems we’ll call “Who and When.” Basically, \"Who\" should create environments and \"When\" should they be created to ensure reasonable cost optimization? Then again, _Who_ should cleanup environments and _When_ do you know that the environment is no longer needed with certainty? Even with highly collaborative teams working hard together for maximum business value, these questions present a difficulty that frequently results in environments running for a long time before they are used and after they are no longer needed. The knowledge of appropriate timing plays a critical role in gaining control over this source of friction.\n\n## The problem of non-immutable development environments\n\nFriction in environment lifecycle management creates a substantial knock-on problem associated with long-lived environments. Long-lived environments that are updated multiple times for various independent projects start to accumulate configuration rot; they become snowflakes with small changes that are left over from non-implemented experiments, software or configuration removals, and other irrelevant bits and pieces. Immutability is the practice of not doing “in place” updates to a computing element, but rather destroying it and replacing it with a fresh, built-from-scratch, element. Docker has made this concept very accepted and effective in production workloads, but development environments frequently do not have this attribute due to automating without the design constraint of immutability, so they are updated in-place for reuse by various initiatives. If the environment lifecycle is not fully automated, it impossible to make them workable on a per-feature branch basis.\n\n## The problem of non-isolated development environments \n\nWhen environments are manually provisioned or when there is a lot of cost or administrative friction to setting them up, environment sharing becomes more common place. This creates sharing contention at many levels. Waiting to schedule into use an environment, pressure to complete work quickly so others can use the environment, and restrictions on the types of changes that can be made to shared environments are just some of the common sharing contention elements that arise. If environments can be isolated, then sharing contention friction evaporates. Pushing this to the extreme of a per-feature branch granularity brings many benefits, but is also difficult.\n\n## Effect on the development value stream\n\nThe effect that a friction-filled environment lifecycle has on the value stream can be immense - how many stories have you heard of projects waylaid for weeks or months while waiting on environment provisioning? What about defects shipped to production because a shared environment had left over configuration during testing? Frequently this friction is tolerated in the value stream because no one will argue that unlimited environment sprawl is an unwise use of company resources. We all turn off the lights in our home when we are no longer using a room and it is good business sense and good stewardship not to leave idle resources running at work.\n\nThe concept of good stewardship of planetary resources is actually becoming an architectural level priority in the technology sector. This is in evidenced in AWS’ [introduction of the “Sustainability” pillar to the AWS Well Architected principals in 2021](https://aws.amazon.com/blogs/aws/sustainability-pillar-well-architected-framework/) and many other green initiatives in the technology sector.\n\nIt’s imperative that efforts to improve the development value stream consider whether developer environment management friction is hampering the breadth, depth and velocity of product management and software development.\n\n## Seamless and fully automated review environment lifecycle management\n\nWhat if this negative feedback loop could be stopped? What if new environments were seamless and automatically created right at the moment they were needed? What if developers were completely happy to immediately tear down an environment when they were done because it takes no justification nor effort on their part to create new one at will?\n\nEnter GitLab Review Environments!\n\nGitLab review apps are created by the developer action of creating a new branch. No humans are involved as the environment is deployed while the developer is musing their first code changes on their branch.\n\nAs the developer pushes code updates the review apps are automatically updated with the changes and all quality checks and security scanning are run to ensure the developer understands that they introduced a vulnerability or quality defect. This is done within the shortest possible amount of time after the defect was introduced.\n\nWhen the developer merges their code, the review app is automatically torn down.\n\nThis seamless approach to developer environment provisioning and cleanup addresses enough of the critical factors in the negative feedback loop that it is effectively nullified.\n\nConsider:\n\n- Developer environment provisioning and cleanup are fully automated, transparent, developer-initiated activities. They do not consume people nor human process resources, which are always legions slower and more expensive than technology solutions.\n- Provisioning and cleanup timing are exactly synchronized with the developer’s need, preventing inefficiencies in idle time before or after environment usage.\n- They are immutable on a new branch basis - a new branch always creates a new environment from fresh copy of the latest code.\n- They are isolated - no sharing contention and no mixing of varying configuration.\n- They treat developer environments as a lifecycle.\n\nIt is so transparent that some developers may not even realize that their feature branch has an isolated environment associated with it.\n\n## Hard dollar costs are important and opportunity costs are paramount\n\nGitLab environments positively contribute to the value stream in two critical ways. First, the actual waste of idle machines is dramatically reduced. However, more importantly, all the human processes that end up being applied to managing that waste also disappear. Machines running in the cloud are only lost money. Inefficient use of people’s time carries a high dollar cost but it also carries a higher opportunity cost. There are so many value-generating activities people can do when their time is unencumbered by cost-control administration.\n\n## Multiplying the value stream contributions of developer review environments\n\nDeveloper environment friction is an industry-wide challenge and GitLab nearly eliminates the core problems of this feedback cycle. However, GitLab has also gone way beyond simply addressing this problem by creating a lot of additional value through seamless per-feature branch developer environments.\n\nHere is a visualization of where dynamic review environments plug into the overall GitLab developer workflow.\n\n![](https://about.gitlab.com/images/blogimages/environment-friction-lifecycle/gitlabenvironmentlifecycle.png)\n\n**Figure 1: Review environments with AWS Cloud Services**\n\nFigure 1 is showing GitLab’s full development cycle support with a little art of the possible thrown in around interfacing with AWS deployment services. The green dashed arrow indicates that GitLab deploys a review environment when the branch is first created. Since the green arrow is part of the developer's iteration loop, the green arrow is also depicting that review app updates are done on each code push. \n\nThe light purple box is showing that the iterative development and CI checks are all within the context of a merge request (MR), which provides a Single Pane of Glass (SPOG) for all quality checks, vulnerabilities and collaboration. Finally, when the merge is done, the review environment is cleaned up. The feature branch merge request is the furthest left that visibility and remediation can be shifted. GitLab’s shifting of this into the developer feature branch is what gives developers a semi-private opportunity to fix any quality or security findings with the specific code they have added or updated.\n\nOne other thing to note here is that when GitLab CD code is engineered to handle review environments, it is reused for all other preproduction and production environments. The set of AWS icons after the “Release” icon would be using the same deployment code. However, if the GitLab CD code is engineered only around deploying to a set of static environments, it is not automatically capable of review environments. Review environment support is a superset of static environment support.\n\n## Review environments enable a profound shift left of visibility and remediation\n\nAt GitLab “shift left” is not just about “problem visibility” but also about “full developer enablement to resolve problems” while in-context. GitLab merge requests provide critical elements that encourage developers to get into a habit of defect remediation:\n\n- **Context** - Defect and vulnerability reporting is only for code the developer changed in their branch and is tracked by the merge request (MR) for that branch.\n- **Responsibility** - Since MRs and branches are associated to an individual, it is evident to the developer (and the whole team) what defects were introduced or discovered by which developers.\n- **Timing** - Developers become aware of defects nearly as soon as they are introduced, not weeks or months after having integrated with other code. If they were working on a physical product, we can envision that all the parts are still on the assembly bench.\n- **Visibility - Appropriately Local, Then Appropriately Global** - Visibility of defects is context specific. While a developer has an open MR that is still a work in progress, they can be left alone to remedy accidentally-introduced defects with little concern from others because the visibility is local to the MR. However, once they seek approvals to merge their code, then the approval process for the MR will cause the visibility of any unresolved defects and vulnerabilities to come to the attention of everyone involved in the approval process. This ensures that oversight happens with just the right timing - not too early and not forgotten. This makes a large-scale contribution to human efficiency in the development value stream.\n- **Advisement** - As much as possible GitLab integrates tools and advice right into the feature branch MR context where the defects are visible. Developers are given full vulnerability details and can take just-in-time training on specific vulnerabilities. \n- **Automated Remediation** - Developers can choose to apply auto-remediations when they are available.\n- **Collaboration** - They can use MR comments and new issues to collaborate with team mates throughout the organization on resolving defects of all types.\n\nHaving seamless, effortless review environments at a per-feature branch granularity is a critical ingredient in GitLab’s ability to maximize the shift left of the above developer capabilities. This is most critical in the developer checks that require a running copy of application, which is provided by the review environments. These checks include things such as DAST, IAST, API fuzzing and accessibility testing. The industry is also continuing to multiply the types of defect scanners that require an actively running copy of the application.\n\n## Extending GitLab review environments to other cloud application framework PaaS\n\nSo you may be thinking, “I love GitLab review environments, but not all of our applications are targeting Kubernetes.” It is true that the out- of-the-box showcasing of GitLab review environments depends on Kubernetes. One of the key reasons for this is that Kubernetes provides an integrated declarative deployment capability known as deployment manifests. The environment isolation capability, known as namespaces, also provides a critical capability. GitLab wires these Kubernetes capabilities up to a few key pieces of GitLab CD to accomplish the magic of isolated, per-feature branch review environments.\n\nAs far as I know there is no formal or defacto industry term for what I’ll call “Cloud Application Framework PaaS.” Cloud-provided PaaS can be targeted at various “levels” of the problem of building applications. For instance, primitive components such as AWS ELB address the problem of application load balancing by providing a variety of virtual, cloud-scaling and secured appliances that you can use as a component of building an application. Another example is [AWS Cognito](https://aws.amazon.com/cognito/) to help with providing user login and profile services to an application build.\n\nHowever, there are also cloud PaaS offerings that seek to solve the entire problem of rapid application building and maintenance. These are services like AWS Amplify and AWS AppRunner. These services frequently knit together primitive PaaS components (such as described above) into a composite that attempts to accelerate the entire process of building applications. Frequently these PaaS also include special CLIs or other developer tools that attempt to abstract the creation, maintenance and deployment of an Infrastructure as Code layer. They also tend to be [GitOps](/topics/gitops/)-oriented by storing this IaC in the same repository as the application code, which enables full control over deployments via Git controls such as branches and merge requests.\n\nThis approach relieves developers of early stage applications from having to learn IaC or hire IaC operations professionals too early. Basically it allows avoidance of overly early optimization of onboarding IaC skills. If the application is indeed successful it is quite common to outgrow the integrated IaC support provided by these specialized PaaS, however, the evolution is very natural because the managed IaC can simply start to be developed by specialists.\n\nThe distinction of cloud application framework PaaS is important when understanding where GitLab can create compound value with Dynamic Review Environments. I will refer to this kind of PaaS as “Cloud Application Infrastructure PaaS” that tries to solve the entire “Building Applications Problem.”\n\nSo we have a bunch of GitLab interfaces and conventions for implementing seamless developer review environments and we have non-Kubernetes cloud application infrastructures that provide declarative deployment interfaces and we can indeed make them work together! Interesting it is all done in GitLab CI YAML, which means that once you see the art of the possible, you can start implementing dynamic review environment lifecycle management for many custom environment types with the existing GitLab features. \n\n## A working, non-Kubernetes example of dynamic review environments in action\n\n![](https://about.gitlab.com/images/blogimages/environment-friction-lifecycle/CloudFormationDeployAnimatedGif.gif)\n\n**Figure 2: Working CD example of review environments for AWS CloudFormation**\n\nFigure 2 shows the details of an actual non-Kubernetes working example called CloudFormation AutoDeploy With Dynamic Review Environments. This project enables any AWS CloudFormation template to be deployed. It specifically supports an isolated stack deployment whenever a review branch is created and then also destroys that environment when the branch is merged. \n\nHere are some of the key design constraints and best practices that allow it to support automated review environments.:\n\n- **The code is implemented as an include.** Notice that the main [.gitlab-ci.yml](https://gitlab.com/guided-explorations/aws/cloudformation-deploy/-/blob/main/.gitlab-ci.yml) files have only variables applicable to this project and then the inclusion of Deploy-AWSCloudFormation.gitlab-ci.yml. This allows you to treat the CloudFormation integration as a managed process, shared include to be improved and updated. If the stress of backward compatibility of managing a shared dependency is too much, you can encourage developers to make a copy of this file to essentially version peg it with their project.\n\n- **Avoids Conflict with Auto DevOps CI Stage Names** - The [standard stages of Auto Devops are here](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml#L70). This constraint allows the auto deploy template to be leveraged. \n\n- **Creates and Sequences Custom Stages as Necessary** - For instance, you can see we’ve added `create-changeset` stage and jobs.\n\n- The `deploy-review` job and it’s `environment:` section must have a very specific construction, let’s look at the important details:\n\n  ```\n    rules:\n      - if: '$CI_COMMIT_BRANCH == \"main\"'\n        when: never\n      - if: '$REVIEW_DISABLED'\n        when: never\n      - if: '($CI_COMMIT_TAG || $CI_COMMIT_BRANCH) && $REQUIRE_CHANGESET_APPROVALS == \"true\"'\n        when: manual\n      - if: '($CI_COMMIT_TAG || $CI_COMMIT_BRANCH) && $REQUIRE_CHANGESET_APPROVALS != \"true\"'\n    artifacts:\n      reports:\n        dotenv: envurl.env\n    environment:\n      name: review/$CI_COMMIT_REF_SLUG\n      url: $DYNAMIC_ENVIRONMENT_URL\n      on_stop: stop_review\n  ```\n\n  \n\n  - `rules:` are used to ensure this job only runs when we are not on the main branch. The main branch implements long lived stage and prod environments.\n  - `artifacts:reports:dotenv` allows variables populated during a CI job to become pipeline level variables. The most critical role this does in this job is to allow the URL retrieved from CloudFormation Outputs to be populated into the variable DYNAMIC_ENVIRONMENT_URL. The file `enviurl.env` would have at least the line `DYNAMIC_ENVIRONMENT_URL={url-from-cloudformation}` in it. You can see this in the job code as `echo \"DYNAMIC_ENVIRONMENT_URL=${STACK_ENV_URL}\" >> envurl.env`\n  - `environment:name:` is using the Auto Deploy convention of placing review apps under the review environments top level called `review` The reference $CI_COMMIT_REF_SLUG ensures that the branch (or tag name) is used, but with all illegal characters removed. By your development convention, the Environment Name should become a part of the IaC constructs that ensure both uniqueness as well as identifiability by this pipeline. In GitLab's standard auto deploy for Kubernetes this is done by constructing a namespace that contains the name in this provided parameter. In CloudFormation we make it part of the Stack Name. The value here is exposed in the job as the variable ${ENVRONMENT}.\n  - `environment:url:` it is not self-evident here that the variable DYNAMIC_ENVIRONMENT_URL was populated by the deployment job and added to the file `enviro.env` so that it would contain the right value at this time. This causes the GitLab “Environment” page to have a clickable link to visit the environment. It also is used by DAST and other live application scan engines to find and scan the isolated environment.\n  - `environment:on_stop:` in the deploy-review job is what maps to the `stop_review` named job. This is the magic sauce behind automatic environment deletion when a feature branch is merged. `stop_review` must be written with the correct commands to accomplish the teardown.\n\n## A reusable engineering pattern\n\nThis CloudFormation pattern serves as a higher-level pattern of how GitLab review environments can be adopted to any other cloud “Application Level PaaS.” This is a term I use to indicate a cloud PaaS that is abstracted highly enough that developers think of it as “a place to deploy applications.” Perhaps a good way to contrast it with PaaS that does not claim to serve as an entire application platform. Cloud-based load balancers are a good example of a PaaS that performs a utility function for applications but is not a place to build an entire cloud application. \n\n## Application PaaS for abstracting IaC concerns for developers\n\nGitLab auto deploy combines well with the cloud application framework PaaS that has a disposition toward developer productivity by reducing or eliminating IaC management required by developers. AWS Amplify has such productivity support in the form of a developer specific CLI which allows impacting to be authored and updated in the same Git repository where the application code is stored. Adding an entire scaling database PaaS is as simple as running a single CLI command.\n\nGenerally such Application PaaS not only generate and help maintain IaC through highly abstracted CLI or UI actions, they also contain a single `deploy` command which is easily combined with a GitLab Auto Deploy template for working with that particular Application PaaS.\n\n## Wrap up\n\nHopefully this article has helped you understand that:\n\n- GitLab already contains a super valuable feature that automates developer environment lifecycle management.\n- It is critical in addressing a key friction in the DevOps value chain.\n- It can be extended beyond Kubernetes to other cloud application framework PaaS offerings.\n\n\nPhoto by [Sandeep Singh](https://unsplash.com/@funjabi?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/friction?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n",[9,1984,1243],{"slug":2847,"featured":6,"template":686},"environment-friction-cycle","content:en-us:blog:environment-friction-cycle.yml","Environment Friction Cycle","en-us/blog/environment-friction-cycle.yml","en-us/blog/environment-friction-cycle",{"_path":2853,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2854,"content":2860,"config":2866,"_id":2868,"_type":14,"title":2869,"_source":16,"_file":2870,"_stem":2871,"_extension":19},"/en-us/blog/explain-this-code",{"title":2855,"description":2856,"ogTitle":2855,"ogDescription":2856,"noIndex":6,"ogImage":2857,"ogUrl":2858,"ogSiteName":670,"ogType":671,"canonicalUrls":2858,"schema":2859},"ML experiment: Explain this source code","Learn how GitLab is experimenting with ML-powered source code explanation features in this fourth installment of our ongoing AI/ML in DevSecOps series.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662840/Blog/Hero%20Images/ai-experiment-stars.png","https://about.gitlab.com/blog/explain-this-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"ML experiment: Explain this source code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2023-04-06\",\n      }",{"title":2855,"description":2856,"authors":2861,"heroImage":2857,"date":2863,"body":2864,"category":1178,"tags":2865},[2862],"Taylor McCaslin","2023-04-06","\n\n\u003Ci>This blog post is part of an ongoing series about GitLab's journey to [build and integrate AI/ML into our DevSecOps platform](/blog/ai-ml-in-devsecops-series/). The series starts here: [What the ML is up with DevSecOps and AI?](/blog/what-the-ml-ai/). Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab.\u003C/i>\n\nDeciphering the source code of a new software project can be a daunting or at least time-consuming task. The code may be poorly documented, or it may be written in a programming language that is unfamiliar to the developer. Even if the developer is familiar with the programming language, the code may be complex and difficult to understand. But what if developers had a helpful tool to figure out very quickly what code was doing? With recent advancements in AI models, it's now possible to have code explained in natural language. \n\n## Explain this code with AI\nAt GitLab, we’re experimenting with AI-assisted code explanations. We want to enable software developers to quickly understand source code they encounter. Whether it's starting with a new project, contributing to a project in a language they're not fluent in, or just trying to understand historical code, we want to help developers get up to speed quickly.\n\nIn a rapid prototype, our own [Denys Mishunov](https://gitlab.com/mishunov), Staff Frontend Engineer, and [Michael Le](https://gitlab.com/mle), Senior Product Designer for our [Create::Source Code group](/handbook/product/categories/#source-code-group), leverage AI to power code explanations within [GitLab's repository source code file viewer](https://docs.gitlab.com/ee/user/project/repository/).\n\n\n![Prototype UX for Explain this Code](https://about.gitlab.com/images/blogimages/explain-this-code-hr.png){: .shadow}\n\nAbove, you can see an example of highlighting a selection of code and asking for a code explanation. Watch the full demo below. \n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/xzsFfFqvlnU\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## Iterating on AI/ML features\nWhile just an experiment today, we are iterating on how to effectively bring features like this to our customers. We're starting on the repository file viewer, and this prototype can be extended to anywhere you interact with code within GitLab, from [merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/) to [snippets](https://docs.gitlab.com/ee/user/snippets.html), and beyond. \n\nThis experiment is just the start of the ways we’re looking to infuse GitLab with AI/ML capabilities to help GitLab users become more efficient and effective at their jobs. We are [looking across the software development lifecycle](/blog/what-the-ml-ai/) for painful and time-consuming tasks that are ideal for AI Assisted features. We’ll continue to share these demos throughout this blog series.\n\nInterested in using these AI-generated features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas. \n\nContinue reading our ongoing series, \"[AI/ML in DevSecOps](/blog/ai-ml-in-devsecops-series/)\".\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[9,1180,916,1181],{"slug":2867,"featured":6,"template":686},"explain-this-code","content:en-us:blog:explain-this-code.yml","Explain This Code","en-us/blog/explain-this-code.yml","en-us/blog/explain-this-code",{"_path":2873,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2874,"content":2880,"config":2885,"_id":2887,"_type":14,"title":2888,"_source":16,"_file":2889,"_stem":2890,"_extension":19},"/en-us/blog/fantastic-infrastructure-as-code-security-attacks-and-how-to-find-them",{"title":2875,"description":2876,"ogTitle":2875,"ogDescription":2876,"noIndex":6,"ogImage":2877,"ogUrl":2878,"ogSiteName":670,"ogType":671,"canonicalUrls":2878,"schema":2879},"Fantastic Infrastructure as Code security attacks and how to find them","Learn about possible attack scenarios in Infrastructure as Code and GitOps environments, evaluate tools and scanners with Terraform, Kubernetes, etc., and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667482/Blog/Hero%20Images/cover-image-unsplash.jpg","https://about.gitlab.com/blog/fantastic-infrastructure-as-code-security-attacks-and-how-to-find-them","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Fantastic Infrastructure as Code security attacks and how to find them\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2022-02-17\",\n      }",{"title":2875,"description":2876,"authors":2881,"heroImage":2877,"date":2882,"body":2883,"category":679,"tags":2884},[2473],"2022-02-17","\n[Infrastructure as Code](/topics/gitops/infrastructure-as-code/)(IaC) has eaten the world. It helps manage and provision computer resources automatically and avoids manual work or UI form workflows. Lifecycle management with IaC started with declarative and idempotent configuration, package, and tool installation. In the era of cloud providers, IaC tools additionally help abstract cloud provisioning. They can create defined resources automatically (network, storage, databases, etc.) and apply the configuration (DNS entries, firewall rules, etc.).\n\nLike everything else, it has its flaws. IaC workflows have shifted left in the development lifecycle, making it more efficient. Developers and DevOps engineers need to learn new tools and best practices. Mistakes may result in leaked credentials or supply chain attacks. Existing security assessment tools might not be able to detect these new vulnerabilities.\n\nIn this post, we will dive into these specific risks and focus on IaC management tools such as Terraform, cloud providers, and deployment platforms involving containers and Kubernetes.\n\nFor each scenario, we will look into threats, tools, integrations, and best practices to reduce risk.\n\nYou can read the blog post top-down or navigate into the chapters individually.\n\n- [Scan your own infrastructure - know what's important](#scan-your-infrastructure---know-what-is-important)\n    - [Thinking like an attacker](#thinking-like-an-attacker)\n- [Tools to detect Terraform vulnerabilities](#tools-to-detect-terraform-vulnerabilities)\n- [Develop more IaC scenarios](#develop-more-iac-scenarios)\n    - [Terraform Module Dependency Scans](#terraform-module-dependency-scans)\n    - [IaC Security Scanning for Containers](#iac-security-scanning-for-containers)\n    - [IaC Security Scanning with Kubernetes](#iac-security-scanning-with-kubernetes)\n- [Integrations into CI/CD and Merge Requests for Review](#integrations-into-cicd-and-merge-requests-for-review)\n    - [Reports in MRs as comment](#reports-in-mrs-as-comment)\n    - [MR Comments using GitLab IaC SAST reports as source](#mr-comments-using-gitlab-iac-sast-reports-as-source)\n- [What is the best integration strategy?](#what-is-the-best-integration-strategy)\n\n## Scan your infrastructure - know what is important\n\nStart with identifying the project/group responsible for managing the IAC tasks. An inventory search for specific IaC tools, file suffixes (Terraform uses `.tf`, for example), and languages can be helpful. The security scan tools discussed in this blog post will discover all supported types automatically. Once you have identified the projects, you can use one of the tools to run a scan and identify the detected possible vulnerabilities.\n\nThere might not be any scan results because your infrastructure is secure at this time. Though, your processes may require you to create documentation, runbooks, and action items for eventually discovered vulnerabilities in the future. Creating a forecast on possible scenarios to defend is hard, so let us change roles from the defender to the attacker for a moment. Which security vulnerabilities are out there to exploit as a malicious attacker? Maybe it is possible to create vulnerable scenarios and simulate the attacker role by running a security scan.\n\n### Thinking like an attacker\n\nThere can be noticeable potential vulnerabilities like plaintext passwords in the configuration. Other scenarios involve cases you would never think of or a chain of items causing a security issue.\n\nLet us create a scenario for an attacker by provisioning an S3 bucket in AWS with Terraform. We intend to store logs, database dumps, or credential vaults in this S3 bucket.\n\nThe following example creates the `aws_s3_bucket` resource in Terraform using the AWS provider.\n\n```hcl\n# Create the bucket\nresource \"aws_s3_bucket\" \"demobucket\" {\n  bucket = \"terraformdemobucket\"\n  acl = \"private\"\n}\n```\n\nAfter provisioning the S3 bucket for the first time, someone decided to make the S3 bucket accessible by default. The example below grants public access to the bucket using `aws_s3_bucket_public_access_block`. `block_public_acls` and `block_public_policy` are set to `false` to allow any public access.\n\n```\n# Grant bucket access: public\nresource \"aws_s3_bucket_public_access_block\" \"publicaccess\" {\n  bucket = aws_s3_bucket.demobucket.id\n  block_public_acls = false\n  block_public_policy = false\n}\n```\n\nThe S3 bucket is now publicly readable, and anyone who knows the URL or scans network ranges for open ports may find the S3 bucket and its data. Malicious actors can not only capture credentials but also may learn about your infrastructure, IP addresses, internal server FQDNs, etc. from the logs, backups, and database dumps being stored in the S3 bucket.\n\nWe need ways to mitigate and detect this security problem. The following sections describe the different tools you can use. The full Terraform code is located in [this project](https://gitlab.com/gitlab-de/use-cases/infrastructure-as-code-scanning/-/tree/main/terraform/aws) and allows you to test all tools described in this blog post.\n\n## Tools to detect Terraform vulnerabilities\n\nIn the \"not worst case\" scenario, the Terraform code to manage your infrastructure is persisted at a central Git server and not hidden somewhere on a host or local desktop. Maybe you are using `terraform init, plan, apply` jobs in CI/CD pipelines already. Let us look into methods and tools that help detect the public S3 bucket vulnerability. Later, we will discuss CI/CD integrations and automating IaC security scanning.\n\nBefore we dive into the tools, make sure to clone the demo project locally to follow the examples yourself.\n\n```shell\n$ cd /tmp\n$ git clone https://gitlab.com/gitlab-de/use-cases/infrastructure-as-code-scanning.git && cd  infrastructure-as-code-scanning/\n```\n\nThe tool installation steps in this blog post are illustrated with [Homebrew on macOS](https://brew.sh/). Please refer to the tools documentation for alternative installation methods and supported platforms.\n\nYou can follow the tools for Terraform security scanning by reading top-down, or navigate into the tools sections directly:\n\n- [tfsec](#tfsec)\n- [kics](#kics)\n- [terrascan](#terrascan)\n- [semgrep](#semgrep)\n- [tflint](#tflint)\n\n### tfsec\n\n[tfsec](https://github.com/aquasecurity/tfsec) from Aqua Security can help detect Terraform vulnerabilities. There are [Docker images available](https://github.com/aquasecurity/tfsec#use-with-docker) to quickly test the scanner on the CLI, or binaries to [install tfsec](https://aquasecurity.github.io/tfsec/v1.1.4/getting-started/installation/). Run `tfsec` on the local project path `terraform/aws/` to get a list of vulnerabilities.\n\n```shell\n$ brew install tfsec\n$ tfsec terraform/aws/\n```\n\nThe default scan provides a table overview on the CLI, which may need additional filters. Inspect `tfsec –help` to get a list of all available [parameters](https://aquasecurity.github.io/tfsec/v1.1.4/getting-started/usage/) and try generating JSON and JUnit output files to process further.\n\n```shell\n$ tfsec terraform/aws --format json --out tfsec-report.json\n1 file(s) written: tfsec-report.json\n$ tfsec terraform/aws --format junit --out tfsec-junit.xml\n1 file(s) written: tfsec-junit.xml\n```\n\nThe full example is located in the [terraform/aws directory in this project](https://gitlab.com/gitlab-de/use-cases/infrastructure-as-code-scanning/-/tree/main/terraform/aws).\n\n#### Parse tfsec JSON reports with jq\n\nIn an earlier blog post, we shared [how to detect the JSON data structures and filter with chained jq commands](/blog/devops-workflows-json-format-jq-ci-cd-lint/). The tfsec report is a good practice: Extract the `results` key, iterate through all array list items and filtered by `rule_service` being `s3`, and only print `severity`, `description` and `location.filename`.\n\n```shell\n$ jq \u003C tfsec-report.json | jq -c '.[\"results\"]' | jq -c '.[] | select (.rule_service == \"s3\") | [.severity, .description, .location.filename]'\n```\n\n![tfsec parser output example](https://about.gitlab.com/images/blogimages/iac-security-scanning/tfsec-json-jq-parser.png){: .shadow}\n\n### kics\n\n[kics](https://kics.io/) is another IaC scanner, providing support for many different tools (Ansible, Terraform, Kubernetes, Dockerfile, and cloud configuration APIs such as AWS CloudFormation, Azure Resource Manager, and Google Deployment Manager).\n\nLet's try it: [Install kics](https://docs.kics.io/latest/getting-started/) and run it on the vulnerable project. `--report-formats`, `--output-path` and `--output-name` allow you to create a JSON report which can be automatically parsed with additional tooling.\n\n```shell\n$ kics scan --path .\n$ kics scan --path . --report-formats json --output-path kics --output-name kics-report.json\n```\n\nParsing the JSON report from `kics` with jq works the same way as the tfsec example above. Inspect the data structure and nested object, and filter by AWS as `cloud_provider`. The `files` entry is an array of dictionaries, which turned out to be a little tricky to extract with an additional `(.files[] | .file_name )` to add:\n\n```\n$ jq \u003C kics/kics-report.json | jq -c '.[\"queries\"]' | jq -c '.[] | select (.cloud_provider == \"AWS\") | [.severity, .description, (.files[] | .file_name ) ]'\n```\n\n![kics json jq parser](https://about.gitlab.com/images/blogimages/iac-security-scanning/kics-json-jq-parser.png){: .shadow}\n\n`kics` returns different [exit codes](https://docs.kics.io/latest/results/#exit_status_code) based on the number of different severities found. `50` indicates `HIGH` severities and causes your CI/CD pipeline to fail.\n\n### checkov\n\n[Checkov](https://checkov.io) supports Terraform (for AWS, GCP, Azure and OCI), CloudFormation, ARM, Severless framework, Helm charts, Kubernetes, and Docker.\n\n```shell\n$ brew install checkov\n$ checkov --directory .\n```\n\n### terrascan\n\n[Terrascan](https://runterrascan.io/docs/getting-started/) supports Terraform, and more [policies](https://runterrascan.io/docs/policies/) for cloud providers, Docker, and Kubernetes.\n\n```shell\n$ brew install terrascan\n$ terrascan scan .\n```\n\n### semgrep\n\nSemgrep is working on [Terraform support](https://semgrep.dev/docs/language-support/), currently in Beta. It also detects Dockerfile errors - for example invalid port ranges and multiple ranges, similar to kics.\n\n```shell\n$ brew install semgrep\n$ semgrep --config auto .\n```\n\n### tflint\n\n[tflint](https://github.com/terraform-linters/tflint) also is an alternative scanner.\n\n## Develop more IaC scenarios\n\nWhile testing IaC Security Scanners for the first time, I was looking for demo projects and examples. The [kics queries list for Terraform](https://docs.kics.io/latest/queries/terraform-queries/) provides an exhaustive list of all vulnerabilities and the documentation linked. From there, you can build and create potential attack vectors for demos and showcases without leaking your company code and workflows.\n\n[Terragoat](https://github.com/bridgecrewio/terragoat) also is a great learning resource to test various scanners and see real-life examples for vulnerabilities.\n\n```shell\n$ cd /tmp && git clone https://github.com/bridgecrewio/terragoat.git && cd terragoat\n\n$ tfsec .\n$ kics scan --path .\n$ checkov --directory .\n$ semgrep --config auto .\n$ terrascan scan .\n```\n\nIt is also important to verify the reported vulnerabilities and create documentation for required actions for your teams. Not all detected vulnerabilities are necessarily equally critical in your environment. With the rapid development of IaC, [GitOps}(https://about.gitlab.com/topics/gitops/), and cloud-native environments, it can also be a good idea to use 2+ scanners to see if there are missing vulnerabilities on one or the other.\n\nThe following sections discuss more scenarios in detail.\n\n- [Terraform Module Dependency Scans](#terraform-module-dependency-scans)\n- [IaC Security Scanning for Containers](#iac-security-scanning-for-containers)\n- [IaC Security Scanning with Kubernetes](#iac-security-scanning-with-kubernetes)\n\n### Terraform Module Dependency Scans\n\nRe-usable IaC workflows also can introduce security vulnerabilities you are not aware of. [This project](https://gitlab.com/gitlab-de/use-cases/iac-tf-vuln-module) provides the module files and package in the registry, which can be consumed by `main.tf` in the demo project.\n\n```hcl\nmodule \"my_module_name\" {\n  source = \"gitlab.com/gitlab-de/iac-tf-vuln-module/aws\"\n  version = \"1.0.0\"\n}\n```\n\nkics has [limited support for the official Terraform module registry](https://docs.kics.io/latest/platforms/#terraform_modules), `checkov` failed to download private modules, `terrascan` and `tfsec` work when `terraform init` is run before the scan. Depending on your requirements, running `kics` for everything and `tfsec` for module dependency checks can be a solution, suggestion added [here](https://gitlab.com/groups/gitlab-org/-/epics/6653#note_840447132).\n\n### IaC Security Scanning for Containers\n\nSecurity problems in containers can lead to application deployment vulnerabilities. The [kics query database](https://docs.kics.io/latest/queries/dockerfile-queries/) helps to reverse engineer more vulnerable examples: Using the latest tag, privilege escalations with invoking sudo in a container, ports out of range, and multiple entrypoints are just a few bad practices.\n\nThe following [Dockerfile](https://gitlab.com/gitlab-de/use-cases/infrastructure-as-code-scanning/-/blob/main/Dockerfile) implements example vulnerabilities for the scanners to detect:\n\n```\n# Create vulnerabilities based on kics queries in https://docs.kics.io/latest/queries/dockerfile-queries/\nFROM debian:latest\n\n# kics: Run Using Sudo\n# kics: Run Using apt\nRUN sudo apt install git\n\n# kics: UNIX Ports Out Of Range\nEXPOSE 99999\n\n# kics: Multiple ENTRYPOINT Instructions Listed\nENTRYPOINT [\"ex1\"]\nENTRYPOINT [\"ex2\"]\n```\n\nKics, tfsec, and terrascan can detect `Dockerfile` vulnerabilities, similar to semgrep and checkov. As an example scanner, terrascan can detect the vulnerabilities using the `--iac-type docker` parameter that allows to filter the scan type.\n\n```shell\n$ terrascan scan --iac-type docker\n```\n\n![terrascan Docker IaC type scan result](https://about.gitlab.com/images/blogimages/iac-security-scanning/terrascan-docker-iac.png){: .shadow}\n\nYou can run kics and tfsec as an exercise to verify the results.\n\n### IaC Security Scanning with Kubernetes\n\nSecuring a Kubernetes cluster can be a challenging task. Open Policy Agent, Kyverno, RBAC, etc., and many different YAML configuration attributes require reviews and automated checks before the production deployments. [Cluster image scanning](https://docs.gitlab.com/ee/user/clusters/agent/vulnerabilities.html) is one way to mitigate security threats, next to [Container scanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/) for the applications being deployed. A suggested read is the book [“Hacking Kubernetes” book](https://www.oreilly.com/library/view/hacking-kubernetes/9781492081722/) by Andrew Martin and Michael Hausenblas if you want to dive deeper into Kubernetes security and attack vectors.\n\nIt's possible to make mistakes when, for example, copying YAML example configuration and continue using it. I've created a deployment and service for a [Kubernetes monitoring workshop](/handbook/marketing/developer-relations/developer-evangelism/projects/#practical-kubernetes-monitoring-with-prometheus), which provides a practical example to learn but also uses some not so good practices.\n\nThe following configuration in [ecc-demo-service.yml](https://gitlab.com/gitlab-de/use-cases/infrastructure-as-code-scanning/-/blob/main/kubernetes/ecc-demo-service.yml) introduces vulnerabilities and potential production problems:\n\n```yaml\n---\n# A deployment for the ECC Prometheus demo service with 3 replicas.\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: ecc-demo-service\n  labels:\n    app: ecc-demo-service\nspec:\n  replicas: 3\n  selector:\n    matchLabels:\n      app: ecc-demo-service\n  template:\n    metadata:\n      labels:\n        app: ecc-demo-service\n    spec:\n      containers:\n      - name: ecc-demo-service\n        image: registry.gitlab.com/everyonecancontribute/observability/prometheus_demo_service:latest\n        imagePullPolicy: IfNotPresent\n        args:\n        - -listen-address=:80\n        ports:\n        - containerPort: 80\n---\n# A service that references the demo service deployment.\napiVersion: v1\nkind: Service\nmetadata:\n  name: ecc-demo-service\n  labels:\n    app: ecc-demo-service\nspec:\n  ports:\n  - port: 80\n    name: web\n  selector:\n    app: ecc-demo-service\n```\n\nLet's scan the Kubernetes manifest with kics and parse the results again with jq. A list of kics queries for Kubernetes can be found in the [kics documentation](https://docs.kics.io/latest/queries/kubernetes-queries/).\n\n```shell\n$ kics scan --path kubernetes --report-formats json --output-path kics --output-name kics-report.json\n\n$ jq \u003C kics/kics-report.json | jq -c '.[\"queries\"]' | jq -c '.[] | select (.platform == \"Kubernetes\") | [.severity, .description, (.files[] | .file_name ) ]'\n```\n\n![Kubernetes manifest scans and jq parser results with kics](https://about.gitlab.com/images/blogimages/iac-security-scanning/kics-kubernetes-jq-parser.png){: .shadow}\n\n[Checkov](https://www.checkov.io/) detects similar vulnerabilities with Kubernetes.\n\n```\n$ checkov --directory kubernetes/\n$ checkov --directory kubernetes -o json > checkov-report.json\n```\n\n[kube-linter](https://docs.kubelinter.io/#/?id=installing-kubelinter) analyzes Kubernetes YAML files and Helm charts for production readiness and security.\n\n```shell\n$ brew install kube-linter\n$ kube-linter lint kubernetes/ecc-demo-service.yml --format json > kube-linter-report.json\n```\n\n[kubesec](https://kubesec.io/) provides security risk analysis for Kubernetes resources. `kubesec` is also integrated into the [GitLab SAST scanners](https://docs.gitlab.com/ee/user/application_security/sast/#enabling-kubesec-analyzer).\n\n```shell\n$ docker run -i kubesec/kubesec:512c5e0 scan /dev/stdin \u003C kubernetes/ecc-demo-service.yml\n```\n\n## Integrations into CI/CD and Merge Requests for Review\n\nThere are many scanners out there, and most of them return the results in JSON which can be parsed and integrated into your CI/CD pipelines. You can learn more about the evaluation of GitLab IaC scanners in [this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/39695). The table in the issue includes licenses, languages, outputs, and examples.\n\n`checkov` and `tfsec` provide JUnit XML reports as output format, which can be parsed and integrated into CI/CD. Vulnerability reports will need a different format though to not confuse them with unit test results for example. Integrating a SAST scanner in GitLab requires you to provide [artifacts:reports:sast](https://docs.gitlab.com/ee/ci/yaml/artifacts_reports.html#artifactsreportssast) as a specified output format and API. [This report](https://docs.gitlab.com/ee/user/application_security/iac_scanning/#reports-json-format) can then be consumed by GitLab integrations such as MR widgets and vulnerability dashboards, available in the Ultimate tier. The following screenshot shows adding a Kubernetes deployment and service with potential vulnerabilities in [this MR](https://gitlab.com/gitlab-de/use-cases/infrastructure-as-code-scanning/-/merge_requests/3).\n\n![MR widget showing IaC vulnerabilities with Kubernetes](https://about.gitlab.com/images/blogimages/iac-security-scanning/gitlab-iac-mr-widget-kubernetes.png){: .shadow}\n\n### Reports in MRs as comment\n\nThere are different ways to collect the JSON reports in your CI/CD pipelines or scheduled runs. One of the ideas can be creating a merge request comment with a Markdown table. It needs a bit more work with parsing the reports, formatting the comment text, and interacting with the GitLab REST API, shown in the following steps in a Python script. You can follow the implementation steps to re-create them in your preferred language for the scanner type and use [GitLab API clients](/partners/technology-partners/#api-clients).\n\nFirst, read the report in JSON format, and inspect whether `kics_version` is set to continue. Then extract the `queries` key, and prepare the `comment_body` with the markdown table header columns.\n\n```python\nFILE=\"kics/kics-report.json\"\n\nf = open(FILE)\nreport = json.load(f)\n\n# Parse the report: kics\nif \"kics_version\" in report:\n    print(\"Found kics '%s' in '%s'\" % (report[\"kics_version\"], FILE))\n    queries = report[\"queries\"]\nelse:\n    raise Exception(\"Unsupported report format\")\n\ncomment_body = \"\"\"### kics vulnerabilities report\n\n| Severity | Description | Platform | Filename |\n|----------|-------------|----------|----------|\n\"\"\"\n```\n\nNext, we need to parse all queries in a loop, and collect all column values. They are collected into a new list, which then gets joined with the `|` character. The `files` key needs a nested collection, as this is a list of dictionaries where only the `file_name` is of interest for the demo.\n\n```python\n# Example query to parse: {'query_name': 'Service Does Not Target Pod', 'query_id': '3ca03a61-3249-4c16-8427-6f8e47dda729', 'query_url': 'https://kubernetes.io/docs/concepts/services-networking/service/', 'severity': 'LOW', 'platform': 'Kubernetes', 'category': 'Insecure Configurations', 'description': 'Service should Target a Pod', 'description_id': 'e7c26645', 'files': [{'file_name': 'kubernetes/ecc-demo-service.yml', 'similarity_id': '9da6166956ad0fcfb1dd533df17852342dcbcca02ac559becaf51f6efdc015e8', 'line': 38, 'issue_type': 'IncorrectValue', 'search_key': 'metadata.name={{ecc-demo-service}}.spec.ports.name={{web}}.targetPort', 'search_line': 0, 'search_value': '', 'expected_value': 'metadata.name={{ecc-demo-service}}.spec.ports={{web}}.targetPort has a Pod Port', 'actual_value': 'metadata.name={{ecc-demo-service}}.spec.ports={{web}}.targetPort does not have a Pod Port'}]}\n\nfor q in queries:\n    #print(q) # DEBUG\n    l = []\n    l.append(q[\"severity\"])\n    l.append(q[\"description\"])\n    l.append(q[\"platform\"])\n\n    if \"files\" in q:\n        l.append(\",\".join((f[\"file_name\"] for f in q[\"files\"])))\n\n    comment_body += \"| \" + \" | \".join(l) + \" |\\n\"\n\nf.close()\n```\n\nThe markdown table has been prepared, so now it is time to communicate with the GitLab API. [python-gitlab](https://python-gitlab.readthedocs.io/en/stable/api-usage.html) provides a great abstraction layer with programmatic interfaces.\n\nThe GitLab API needs a project/group access token with API permissions. The `CI_JOB_TOKEN` is not sufficient.\n\n![Set the Project Access Token as CI/CD variable, not protected](https://about.gitlab.com/images/blogimages/iac-security-scanning/gitlab-cicd-variable-project-access-token.png){: .shadow}\n\nRead the `GITLAB_TOKEN` from the environment, and instantiate a new `Gitlab` object.\n\n```python\nGITLAB_URL='https://gitlab.com'\n\nif 'GITLAB_TOKEN' in os.environ:\n    gl = gitlab.Gitlab(GITLAB_URL, private_token=os.environ['GITLAB_TOKEN'])\nelse:\n    raise Exception('GITLAB_TOKEN variable not set. Please provide an API token to update the MR!')\n```\n\nNext, use the `CI_PROJECT_ID` CI/CD variable from the environment to select the [project object](https://python-gitlab.readthedocs.io/en/stable/gl_objects/projects.html) which contains the merge request we want to target.\n\n```python\nproject = gl.projects.get(os.environ['CI_PROJECT_ID'])\n```\n\nThe tricky part is to fetch the [merge request](https://python-gitlab.readthedocs.io/en/stable/gl_objects/merge_requests.html) ID from the CI/CD pipeline, it is not always available. A workaround can be to read the `CI_COMMIT_REF_NAME` variable and match it against all MRs in the project, looking if the `source_branch` matches.\n\n```python\nreal_mr = None\n\nif 'CI_MERGE_REQUEST_ID' in os.environ:\n    mr_id = os.environ['CI_MERGE_REQUEST_ID']\n    real_mr = project.mergerequests.get(mr_id)\n\n# Note: This workaround can be very expensive in projects with many MRs\nif 'CI_COMMIT_REF_NAME' in os.environ:\n    commit_ref_name = os.environ['CI_COMMIT_REF_NAME']\n\n    mrs = project.mergerequests.list()\n\n    for mr in mrs:\n        if mr.source_branch in commit_ref_name:\n            real_mr = mr\n            # found the MR for this source branch\n            # print(mr) # DEBUG\n\nif not real_mr:\n    print(\"Pipeline not run in a merge request, no reports sent\")\n    sys.exit(0)\n```\n\nLast but not least, use the MR object to [create a new note](https://python-gitlab.readthedocs.io/en/stable/gl_objects/notes.html) with the `comment_body` including the Markdown table created before.\n\n```python\nmr_note = real_mr.notes.create({'body': comment_body})\n```\n\nThis workflow creates a new MR comment every time a new commit is pushed. Consider evaluating the script and refining the update frequency by yourself. The script can be integrated into CI/CD with running kics before generating the reports shown in the following example configuration for `.gitlab-ci.yml`:\n\n```yaml\n# Full RAW example for kics reports and scans\nkics-scan:\n  image: python:3.10.2-slim-bullseye\n  variables:\n    # Visit for new releases\n    # https://github.com/Checkmarx/kics/releases\n    KICS_VERSION: \"1.5.1\"\n  script:\n    - echo $CI_PIPELINE_SOURCE\n    - echo $CI_COMMIT_REF_NAME\n    - echo $CI_MERGE_REQUEST_ID\n    - echo $CI_MERGE_REQUEST_IID\n    - apt-get update && apt-get install wget tar --no-install-recommends\n    - set -ex; wget -q -c \"https://github.com/Checkmarx/kics/releases/download/v${KICS_VERSION}/kics_${KICS_VERSION}_linux_x64.tar.gz\" -O - | tar -xz --directory /usr/bin &>/dev/null\n    # local requirements\n    - pip install -r requirements.txt\n    - kics scan --no-progress -q /usr/bin/assets/queries -p $(pwd) -o $(pwd) --report-formats json --output-path kics --output-name kics-report.json || true\n    - python ./integrations/kics-scan-report-mr-update.py\n```\n\nYou can find the [.gitlab-ci.yml configuration](https://gitlab.com/gitlab-de/use-cases/infrastructure-as-code-scanning/-/blob/main/.gitlab-ci.yml) and the full script, including more inline comments and debug output [in this project](https://gitlab.com/gitlab-de/use-cases/infrastructure-as-code-scanning). You can see the implementation MR testing itself in [this comment](https://gitlab.com/gitlab-de/use-cases/infrastructure-as-code-scanning/-/merge_requests/4#note_840472146).\n\n![MR comment with the kics report as Markdown table](https://about.gitlab.com/images/blogimages/iac-security-scanning/kics-python-gitlab-mr-update-table.png){: .shadow}\n\n### MR comments using GitLab IaC SAST reports as source\n\nThe steps in the previous section show the raw `kics` command execution, including JSON report parsing that requires you to create your own parsing logic. Alternatively, you can rely on the [IaC scanner in GitLab](https://docs.gitlab.com/ee/user/application_security/iac_scanning/#making-iac-analyzers-available-to-all-gitlab-tiers) and parse the SAST JSON report as [a standardized format](https://docs.gitlab.com/ee/user/application_security/iac_scanning/#reports-json-format). This is available for all GitLab tiers.\n\nDownload the [gl-sast-report.json example](https://gitlab.com/gitlab-de/use-cases/infrastructure-as-code-scanning/-/blob/main/example-reports/gl-sast-report-kics-iac.json), save it as `gl-sast-report.json` in the same directory as the script, and parse the report in a similar way shown above.\n\n```python\nFILE=\"gl-sast-report.json\"\n\nf = open(FILE)\nreport = json.load(f)\n\n# Parse the report: kics\nif \"scan\" in report:\n    print(\"Found scanner '%s' in '%s'\" % (report[\"scan\"][\"scanner\"][\"name\"], FILE))\n    queries = report[\"vulnerabilities\"]\nelse:\n    raise Exception(\"Unsupported report format\")\n```\n\nThe parameters in the vulnerability report also include the CVE number. The `location` is using a nested dictionary and thus easier to parse.\n\n```python\ncomment_body = \"\"\"### IaC SAST vulnerabilities report\n\n| Severity | Description | Category | Location | CVE |\n|----------|-------------|----------|----------|-----|\n\"\"\"\n\nfor q in queries:\n    #print(q) # DEBUG\n    l = []\n    l.append(q[\"severity\"])\n    l.append(q[\"description\"])\n    l.append(q[\"category\"])\n    l.append(q[\"location\"][\"file\"])\n    l.append(q[\"cve\"])\n\n    comment_body += \"| \" + \" | \".join(l) + \" |\\n\"\n\nf.close()\n```\n\nThe `comment_body` contains the Markdown table, and can use the same code to update the MR with a comment using the GitLab API Python bindings. An example run is shown in [this MR comment](https://gitlab.com/gitlab-de/use-cases/infrastructure-as-code-scanning/-/merge_requests/8#note_841940319).\n\nYou can integrate the script into your CI/CD workflows using the following steps: 1) Override the `kics-iac-sast` job `artifacts` created by the `Security/SAST-IaC.latest.gitlab-ci.yml` template and 2) Add a job `iac-sast-parse` which parses the JSON report and calls the script to send a MR comment.\n\n```yaml\n# GitLab integration with SAST reports spec\ninclude:\n- template: Security/SAST-IaC.latest.gitlab-ci.yml\n\n# Override the SAST report artifacts\nkics-iac-sast:\n  artifacts:\n    name: sast\n    paths:\n      - gl-sast-report.json\n    reports:\n      sast: gl-sast-report.json\n\niac-sast-parse:\n  image: python:3.10.2-slim-bullseye\n  needs: ['kics-iac-sast']\n  script:\n    - echo \"Parsing gl-sast-report.json\"\n    - pip install -r requirements.txt\n    - python ./integrations/sast-iac-report-mr-update.py\n  artifacts:\n      paths:\n      - gl-sast-report.json\n```\n\nThe CI/CD pipeline testing itself can be found in [this MR comment](https://gitlab.com/gitlab-de/use-cases/infrastructure-as-code-scanning/-/merge_requests/9#note_841976761). Please review the [sast-iac-report-mr-update.py](https://gitlab.com/gitlab-de/use-cases/infrastructure-as-code-scanning/-/blob/main/integrations/sast-iac-report-mr-update.py) script and evaluate whether it is useful for your workflows.\n\n## What is the best integration strategy?\n\nOne way to evaluate the scanners is to look at their extensibility. For example, [kics](https://docs.kics.io/latest/creating-queries/) calls them `queries`, [semgrep](https://semgrep.dev/docs/writing-rules/overview/) uses `rules`, [checkov](https://www.checkov.io/3.Custom%20Policies/Custom%20Policies%20Overview.html) says `policies`, [tfsec](https://aquasecurity.github.io/tfsec/v1.1.5/getting-started/configuration/custom-checks/) goes for `custom checks` as a name. These specifications allow you to create and contribute your own detection methods with extensive tutorial guides.\n\nMany of the shown scanners provide container images to use, or CI/CD integration documentation. Make sure to include this requirement in your evaluation. For a fully integrated and tested solution, use the [IaC Security Scanning feature in GitLab](https://docs.gitlab.com/ee/user/application_security/iac_scanning/), currently based on the `kics` scanner. If you already have experience with other scanners, or prefer your own custom integration, evaluate the alternatives for your solution. All scanners discussed in this blog post provide JSON as output format, which helps with programmatic parsing and automation.\n\nMaybe you'd like to [contribute a new IaC scanner](https://docs.gitlab.com/ee/user/application_security/iac_scanning/#contribute-your-scanner) or help improve the detection rules and functionality from the open source scanners :-)\n\nCover image by [Sawyer Bengtson](https://unsplash.com/photos/tnv84LOjes4) on [Unsplash](https://unsplash.com)\n{: .note}\n",[875,1477,9],{"slug":2886,"featured":6,"template":686},"fantastic-infrastructure-as-code-security-attacks-and-how-to-find-them","content:en-us:blog:fantastic-infrastructure-as-code-security-attacks-and-how-to-find-them.yml","Fantastic Infrastructure As Code Security Attacks And How To Find Them","en-us/blog/fantastic-infrastructure-as-code-security-attacks-and-how-to-find-them.yml","en-us/blog/fantastic-infrastructure-as-code-security-attacks-and-how-to-find-them",{"_path":2892,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2893,"content":2899,"config":2904,"_id":2906,"_type":14,"title":2907,"_source":16,"_file":2908,"_stem":2909,"_extension":19},"/en-us/blog/first-time-open-source-contributor-5-things-to-get-you-started",{"title":2894,"description":2895,"ogTitle":2894,"ogDescription":2895,"noIndex":6,"ogImage":2896,"ogUrl":2897,"ogSiteName":670,"ogType":671,"canonicalUrls":2897,"schema":2898},"First time open source contributor? 5 things to get you started","Open source really is *open* but it can be difficult to know where (and how) to jump in. Here's our best advice.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671390/Blog/Hero%20Images/developers-choose-open-source.jpg","https://about.gitlab.com/blog/first-time-open-source-contributor-5-things-to-get-you-started","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"First time open source contributor? 5 things to get you started\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-02-07\",\n      }",{"title":2894,"description":2895,"authors":2900,"heroImage":2896,"date":2901,"body":2902,"category":679,"tags":2903},[745],"2022-02-07","If you haven’t yet contributed to an open source software project, you may be eager to get going. Contributing to open source is a [great way to learn, teach, and build your technical expertise](https://clearcode.cc/blog/why-developers-contribute-open-source-software/). And it feels good to be part of a community. Yet your first time contributing can be intimidating. Here are five things you need to know to get up and running on open source:\n\n1. Contributing isn’t just about writing code. Open source projects need help on a variety of things, starting with coding, but also things like designing navigation and menus, writing documentation, managing timelines, organizing open issues, moderating message boards and answering questions. [Other ways to get started/](https://www.hanselman.com/blog/get-involved-in-open-source-today-how-to-contribute-a-patch-to-a-github-hosted-open-source-project-like-code-52) File a bug and suggest a patch for it or suggest a feature. In short, [there are many ways to contribute](https://opensource.guide/how-to-contribute/#why-contribute-to-open-source), in line with your interests and expertise. And no matter what you give, you’ll meet people and become an appreciated member of the group – sometimes contributing on ancillary things will earn you more points than coding.  \n\n2. Confusion is ok. If you’re bewildered at first, it’s not just because you’re a newbie. Each open source project has its own culture, [including terms of art, behavior norms, accepted practices](https://opensource.guide/how-to-contribute/#orienting-yourself-to-a-new-project), etc. So, even if you work for years on one project and are completely up to speed on what life is like there, it’s more than likely your next project will be totally different. There are some things that are usually present, such as the [roles of people on the project](https://opensource.guide/leadership-and-governance/), including author, owner, maintainer, contributor and committer. But the fact is, it will take time, observation and interacting with project members to understand how things are done within a project – and whether or not you are a good fit. If the vibe is not right, go elsewhere. There are so many projects that could use your support.    \n\n3. If there is a code of conduct, you need to get familiar with it. Not all open source projects will have a [code of conduct](https://opensource.guide/code-of-conduct/). When you’re interested in a project, be sure to see if there is a code of conduct and, if so, what it says. That way, you won’t make a gaffe without realizing it (and having to hear about it from everyone else). At a high level, respect the other participants (see number 5, below). If there is no explicit code of conduct, there are [core values and norms](https://opensource.com/open-organization/21/8/leadership-cultural-social-norms) that are recognized in the open source community. Chief among these are kindness and worldwide collaboration.\n\n4. Open Source Projects often have community governance models. There are [three types of org structures](https://opensource.guide/leadership-and-governance/) generally associated with open source projects: BDFL (Benevolent Dictator for Life; [Python](/blog/beginner-guide-python-programming/)is [one example](https://artsandculture.google.com/entity/benevolent-dictator-for-life/m03m3r0l?hl=en), meritocracy (this exact term may not be used but it’s about the relative “merit” of contributions; [Apache projects](https://www.apache.org/index.html#projects-list) follow this model) or liberal contribution (under which the people who contribute the most have the most say; [Node.js](https://openjsf.org) and [Rust](/blog/rust-programming-language/)are examples). In recent years, the BDFL model has [fallen out of favor](https://readwrite.com/open-source-magento-roy-rubin-bdfl/) in some circles as it leaves the project vulnerable if a leader steps away. [As Jason Baker wrote](https://opensource.com/article/18/7/bdfl) on OpenSource.com, “How an open source project is governed can have very real consequences on the long-term sustainability of its user and developer communities alike.” Just something to keep in mind.\n\n5. When in doubt, ask away, there are no dumb questions. As with any group you might belong to, you and the other members will be happier if the tone is welcoming and kind. Essentially, you’re there to collaborate so respect is important. Open source participants tend to be diverse in every possible way, stay open and considerate. Women traditionally are underrepresented in open source, [so be encouraging](https://internethealthreport.org/2019/codes-of-conduct-in-open-source-communities/). Try not to waste people’s time and provide as much context as needed in issues and conversations. Most projects will set the expectation that participants should [respect each other and be civil](https://developer.mozilla.org/en-US/docs/MDN/Contribute/Open_source_etiquette) in their interactions. \n\nThe rules are a lot like the ones you may have learned in your childhood: Observe before you jump in, share your knowledge generously, always thank people who help you, and play well with others. Don’t be tempted to add to threads just to see your name. Try to find answers to questions within the community before you ask. Read the README file. [Read the documentation](https://gomakethings.com/open-source-etiquette/). If you do ask a question or send a pull request, be patient. Don't expect an immediate reply and don’t keep posting the same question. People have different priorities and might have been caught up with work and life. Make sure you have buy-in from project implementers before you send in actual code. This shows you want to contribute and you respect the work that has gone on before you.    \n\nReady to get started? Here are some success stories from our community to inspire you:\n* Dave Barr wrote about [“Why new software engineers should contribute to GitLab”](https://davebarr.dev/why-new-software-engineers-should-contribute-to-gitlab/)\n\n* [You’re hired! Two GitLab contributors turn their success into full-time engineering roles](/blog/you-are-hired-two-gitlab-contributors-turn-their-success-into-full-time-engineering-roles/)",[682,9,267],{"slug":2905,"featured":6,"template":686},"first-time-open-source-contributor-5-things-to-get-you-started","content:en-us:blog:first-time-open-source-contributor-5-things-to-get-you-started.yml","First Time Open Source Contributor 5 Things To Get You Started","en-us/blog/first-time-open-source-contributor-5-things-to-get-you-started.yml","en-us/blog/first-time-open-source-contributor-5-things-to-get-you-started",{"_path":2911,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2912,"content":2917,"config":2924,"_id":2926,"_type":14,"title":2927,"_source":16,"_file":2928,"_stem":2929,"_extension":19},"/en-us/blog/five-devops-platform-benefits-that-inspire-gitlab-users",{"title":2913,"description":2914,"ogTitle":2913,"ogDescription":2914,"noIndex":6,"ogImage":2055,"ogUrl":2915,"ogSiteName":670,"ogType":671,"canonicalUrls":2915,"schema":2916},"Five benefits that inspire users to become GitLab advocates","Learn how a single tweet led to insight from our advocates on how they use GitLab to improve their work and their own lives.","https://about.gitlab.com/blog/five-devops-platform-benefits-that-inspire-gitlab-users","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 DevOps platform benefits that inspire GitLab users to become GitLab advocates\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Omar Fernandez\"}],\n        \"datePublished\": \"2021-11-23\",\n      }",{"title":2918,"description":2914,"authors":2919,"heroImage":2055,"date":2921,"body":2922,"category":769,"tags":2923},"5 DevOps platform benefits that inspire GitLab users to become GitLab advocates",[2920],"Omar Fernandez","2021-11-23","\n\nAt GitLab, we believe that a [single DevOps platform helps teams](https://about.gitlab.com/handbook/product/single-application/) to collaborate better and deliver software faster and with better security. In September, GitLab’s CEO Sid Sijbrandij [asked on Twitter](https://twitter.com/sytses/status/1440799819119824898) for volunteers willing to share their stories of advocating for the adoption of GitLab. Over the following days, GitLab team members interviewed 25 GitLab advocates who offered to share their experiences. Among other things, we asked them: \n\n- How did you first encounter GitLab?\n- Why have you advocated for the adoption of GitLab?\n- How has advocating and using GitLab in your organization benefited you? \n\nOur advocate interviews validated that GitLab’s single-application [DevOps Platform](/solutions/devops-platform/) has unlocked value for GitLab users. Below are excerpts of some of these interviews to give you the opportunity to hear directly from GitLab users. In them, you'll learn about five GitLab benefits that converted these users into advocates. \n\n## 1. A single application helps focus on work that matters\n\nGitLab’s single application helps users to focus on work that matters. GitLab reduces the need to context-switch as users no longer need to jump across disparate point-solutions. Users are better able to focus, stay on task, and drive business results.\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/pMWXn6NslEE\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nSam Briesemeister highlights the benefits of working on one platform, being able to link the work done to a specific issue, and increasing developer productivity. By using GitLab, users save time in their life. \n\n_“What [investing in GitLab] ultimately does is, actually, we’re saving somebody’s time [in] their life. We’re not wasting their life.”_\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n  \u003Ciframe src=\"https://www.youtube.com/embed/YcsT53c_Nuo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nProfessor Neil Gershenfeld speaks about how GitLab allows his labs to do what used to require five separate solutions, one each for web serving, teaching classes, access control, documentation, and security. \n\n_When asked how GitLab has made your life better, Professor Gershenfeld said: “It’s almost hard to answer because it’s like ‘why do I like air?’ It’s just sort of, most of my work ends up in GitLab. It’s just a natural part of my working day.”_\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/nUY8RrOyGPo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nPhilipp Westphalen, one of our [GitLab Heroes](https://about.gitlab.com/community/heroes/members/), speaks about GitLab’s ease of use and how having a single tool instead of multiple separate solutions allows him to focus on getting things done. \n\n_“For me, it feels like home... It’s really easy to use... and you can focus on getting things done.”_\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/heFWR23Z5nw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nPavle Djuric also speaks to us about the ability to focus on work and GitLab’s ease of use.\n\n_“[Working in GitLab] makes you feel very professional. You feel like you’re doing your job. You’re way more efficient as a team.”_\n\n## 2. Reducing manual tasks through automation\n\nSeveral advocates spoke about the benefits of automating tasks within GitLab to free up time for more productive activities. \n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/HW0ByLmG8sY\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nAndrew Jones speaks about using GitLab to reduce repetitive tasks. He can’t imagine going back to the old way of doing things with many manual tasks. \n\n_“It just takes care of the stuff that would normally be laborious, painful repetitive stuff and allows you to focus on your primary function. I couldn’t imagine working without GitLab. I can’t imagine going back to the old way.”_\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/oN1cieaeLBk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nJan Mikes tells us about automation and the ability to get things done without context switching or moving across apps. This helps his productivity and efficiency. \n\n_“There’s high demand for CI engineers and since I work as an architect, this is a high-demand skill, to write pipelines, optimize performance, shorten the time from writing the code to deploy to production. And all of this I can deliver and that’s another reason why I love GitLab, because anything that comes to my mind, I figure some way how to do it with GitLab CI.”_\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/SvQUM6DL1B4\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nMarc-Aurele Brothier speaks to us about how the adoption of GitLab helped his team be more efficient and streamline collaboration. \n\n_“[A customer was] very happy because we could demonstrate [to] them that, with [GitLab], that they could create the release, open a PR, say I want to deploy in our environment, and just accept it, and it was done two minutes later and it was automated. So it’s not anymore like asking someone, sending emails, or sending a request to another team. Just you do it and you get it.”_\n\n## 3. Improved transparency and collaboration\n\nTransparency and Collaboration are two of [GitLab’s values](https://handbook.gitlab.com/handbook/values/), so it was great to hear how adopting GitLab helps teams operate in a more transparent and collaborative manner. \n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/1rdtQ3tvDtg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nGerben Geijteman tells us that collaboration and communication are enhanced by the transparency you get in GitLab by having the solution, or code, linked to directly from the issue tracker. This benefits collaboration with other team members and with clients. \n\n_“GitLab for me unifies it all in the same place so everyone is looking at the same code in the same direction with the same quality level.”_\n\n_“In projects where we have direct customer communication, we like to also use GitLab because GitLab gives you a more direct mode of communication. You can say we fixed this issue with this particular line of code at that particular moment in time. And it takes away intermediate layers of communication... and it keeps everything in context.”_\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/pWVEnIQjGbE\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nSebastian Schmid talks about how, since the adoption of GitLab, different teams in his organization are able to share and reuse source code more easily.\n\n_“Before [GitLab], only the team working on the code was able to see the code... [After starting to use GitLab] they started to use source code from other teams and collaborate in code with other teams.”_ \n\n_“People could start to contribute to our product and they don’t need to have some special skills. They just could write [an] issue, could paste some screenshots and stuff like that, and some engineers could take this and improve the product.”_\n\n## 4. A welcoming community\n\nAnyone can directly contribute to our open source GitLab core and help enhance the GitLab platform. We appreciated hearing advocates talk about their experiences contributing to GitLab and how welcoming the GitLab community has been. \n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n  \u003Ciframe src=\"https://www.youtube.com/embed/qy9f-7DI_5k\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nDave Barr speaks about the value of working on a platform with an open source core and how great it is to see GitLab employees interact with contributors in the same way that they would with other GitLab employees. \n\n_“How you interact with that community is really telling. The way GitLab staff does that is really embracing, welcoming, open to feedback. They provide feedback; it's just like you’re a staff member. The approach they take to community contributions is the same exact approach they take for a staff merge request and that’s a fantastic approach.”_ \n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/LNp3ioZr5mg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nGary Bell talks to us about how welcoming, responsive, and understanding the GitLab community is with new people who want to contribute. \n\n_“Personally, I’ve just found the overall GitLab community to be very welcoming and very understanding. Just the patience people have... they’re just welcoming and willing to give the time to help. That’s been absolutely fantastic to feel that, which is something that I’ve not felt when I’ve tried to contribute to other open source projects before.”_\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/4-z3QjPzFPk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nSimilarly, Sven Patrick Meier shares with us his journey from identifying a potential feature, proposing it, and working through the process to get the contribution accepted. \n\n_“[I submitted a contribution] and the maintainers of the project commented on my feature request and said ‘great idea,’ and I provided the template as a basic example. They helped me with so many things, and, right now, I’m right before the first contribution to that awesome product that I use every day.”_ \n\n## 5. Exposure to end-to-end modern software development\n\nUsers talk about how GitLab helps introduce them to modern software development practices. They appreciated GitLab’s monthly releases packed with new features. \n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/YMydvPCIg44\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nMarcos Ortiz praises GitLab’s ease of use and how it made it easier to onboard team members. Marcos also speaks about how, when you get used to the way of doing things on GitLab, you can internalize beneficial development practices.\n\n_“When you get used to all these practices, load code frequently, to get everything in branch inside your repo story, I believe you can be faster in development.”_ \n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/fP50GWZxz48\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nIn our discussion with Andy Malkin and Michael Kalange, we hear about how they feel that GitLab is not only on the cutting edge, but also a very reliable part of their work. \n\n_“When I use [GitLab], I feel like I’m on the cutting edge. A lot of time in tech you can feel like you’re using something and you know it’s outdated, but I don’t have that feeling with GitLab. When I’m using GitLab, I feel that you really are pushing the boundaries in terms of what’s the next thing that we need.”_ \n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/7gU12X10718\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nChris Evans speaks to us about how GitLab's neatly organized user interface helped him learn more about the overall DevOps processes and tools.\n\n_“[I] started off as a network engineer [and] I ended up [in] some sysadmin-related roles but I was never really exposed to the software development lifecycle... but just through choosing GitLab as a project management platform, I was exposed to so many of the tools of the trade for this other industry, software engineering, and I was able to almost learn those in a way without having to use them directly.”_  \n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/JhfFlSBQ7tY\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nSimilarly, Ion Nistor tells us about how he gets exposed to new areas and tools in DevOps by using GitLab. \n\n_“I like to learn. Many of the things that GitLab brings are related to new technologies and new ways of doing things. GitLab in this sense acted [as a] gateway to new technologies. I have to learn about DevOps more, about containers, and these are benefits for my personal development.”_ \n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube.com/embed/yuBeOxqnou4\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\nDibyadarshi Dash, a past intern at GitLab, highlights how having a single, integrated product to develop software made it easier for him to learn about software development end-to-end. \n\n_“I got a good exposure to what the software development lifecycle looks like because it was all in one platform, all in GitLab. The writing, coding, merging, deploying, testing, everything was in one platform. And I feel that because it was all in one integrated platform, I got a good holistic exposure to the whole cycle and I understood the cycle even better.”_ \n\n## Bringing it all together\n\nThe GitLab advocates reinforced our belief in GitLab’s value as a single DevOps platform. The advocates talked about being able to focus on getting work done, using automation to reduce manual steps, and transparently collaborating with their colleagues. They also highlighted how GitLab helps them get exposed to and learn about the modern software development process end-to-end thanks to our fast innovation and how our welcoming open source community has made it possible for them to contribute features to GitLab.\n",[9,1515,267],{"slug":2925,"featured":6,"template":686},"five-devops-platform-benefits-that-inspire-gitlab-users","content:en-us:blog:five-devops-platform-benefits-that-inspire-gitlab-users.yml","Five Devops Platform Benefits That Inspire Gitlab Users","en-us/blog/five-devops-platform-benefits-that-inspire-gitlab-users.yml","en-us/blog/five-devops-platform-benefits-that-inspire-gitlab-users",{"_path":2931,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2932,"content":2938,"config":2943,"_id":2945,"_type":14,"title":2946,"_source":16,"_file":2947,"_stem":2948,"_extension":19},"/en-us/blog/five-essential-business-benefits-a-devops-platform-gives-smbs",{"title":2933,"description":2934,"ogTitle":2933,"ogDescription":2934,"noIndex":6,"ogImage":2935,"ogUrl":2936,"ogSiteName":670,"ogType":671,"canonicalUrls":2936,"schema":2937},"Five essential business benefits a DevOps platform gives SMBs","Multiply your SMB’s tech muscle, reduce expenses, and cut wasted time.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668332/Blog/Hero%20Images/architecture-building-business-258163.jpg","https://about.gitlab.com/blog/five-essential-business-benefits-a-devops-platform-gives-smbs","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Five essential business benefits a DevOps platform gives SMBs\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-08-30\",\n      }",{"title":2933,"description":2934,"authors":2939,"heroImage":2935,"date":2940,"body":2941,"category":769,"tags":2942},[810],"2022-08-30","\nSmall and medium-sized businesses (SMBs) face a litany of potentially crippling obstacles, but there’s a single step executives can take that will create multiple business benefits. \n\n[Migrating to an end-to-end DevOps platform for SMBs](https://page.gitlab.com/migrate-to-devops-guide.html) will not only greatly improve an SMB’s odds of survival, but it will increase their chance of actually thriving in an environment that sees half of all small businesses failing within their first five years. That’s right. All businesses face competition and obstacles, but SMBs and small and medium-sized enterprises (SMEs), in particular, are looking at an uphill battle so steep that 20% of U.S. small businesses fail within just the first year, [according to the U.S. Bureau of Labor Statistics](https://www.bls.gov/bdm/entrepreneurship/entrepreneurship.htm). So why not grab onto any advantage available, especially one this beneficial?\n\nHere’s how [a full DevOps platform can help any SMB](/blog/gitlab-provides-small-business-with-a-professional-mature-devops-platform/):\n\n## Multiply tech muscle\n\nLarge enterprises might have an IT department, or even a separate DevOps group, made up of dozens or hundreds of people. That’s not the case with SMBs and SMEs. A small business might just have one IT person. That leaves one – or two or five – people shouldering a whole lot of work. They’re left not only to handle issues with cybersecurity, email, and buggy laptops, but they also have to design, develop, and deploy new software and iterations. With a DevOps platform, a lot of repetitive tasks are automated, and security testing is built in from the get-go, freeing up a lot of time. With a DevOps platform, it’s possible to do more with fewer hands. \n\n## Engage the entire team\n\n[Fostering collaboration](/blog/5-ways-collaboration-boosts-productivity-and-your-career/) is a big part of a DevOps platform and it’s of particular benefit to SMBs. Yes, there are fewer employees in a smaller organization. That, though, doesn’t have to be a disadvantage. A DevOps platform fosters a collaborative environment, [breaking down departmental silos](/blog/developing-a-successful-devops-strategy/) and enabling everyone – from the head of the business to people in sales, marketing, and customer service – to work together on software planning and design. That means a wider swath of employees can pitch in on projects, naturally bringing more input and help to the table. And that makes software more inclusive and well-rounded. It also makes employees more engaged. \n\n## Stop wasting time and effort on a toolchain\n\nSMBs, like their larger enterprise brethren, turn to DevOps to more efficiently and quickly develop and deploy software. But when they don’t go with a single, end-to-end DevOps platform, they end up creating a complicated tangle of tools, or a toolchain. And these toolchains force them to not only learn, but continually switch back and forth between multiple interfaces, passwords, and ways of working. Even worse, those [taxing toolchains](/topics/devops/use-devops-platform-to-avoid-devops-tax/) only grow in size and unwieldiness as the business grows. With fewer IT people onboard, the full burden of these toolchains falls solely on a limited number of people – or even worse, it might fall on just one person. Get rid of that chaotic environment, and the waste of time and effort it brings, by migrating to a single application. \n\n\n## Eliminate the expense of a toolchain\n\nSince most SMBs have limited budgets, many often turn to DevOps tools that have what initially appear to be smaller price tags. However, by casting around for what might seem like a bargain, it creates an even greater mishmash of tools, which the company continually has to pay for. A [2020 Forrester Consulting Total Economic Impact Study](https://learn.gitlab.com/c/forrester-tei?x=X4W83-) noted that moving to a single DevOps application improves development and delivery efficiency by more than 87%, cuts down on licensing costs, and increases savings. The [expenses that come along](/webcast/simplify-to-accelerate/) with multiple licenses and continual maintenance are diminished with a single, end-to-end DevOps platform, driving the bottom line and delivering business value. \n\n## Improved security benefits for your business\n\nSMBs have the perfect chance to build security into their code and processes from the very beginning. That’s a much better process than [making security an afterthought](/blog/toolchain-security-with-gitlab/), or completely pushing security aside when projects are bumping up against tight deadlines. That won’t happen with a single DevOps platform, which integrates security into the entire software delivery lifecycle – from planning through design, build, and monitoring. Every single step of the development process. A DevOps platform even automates security testing, ensuring it’s not forgotten and relieving IT professionals from some repetitive, hands-on tasks. When [security is shifted left](/blog/efficient-devsecops-nine-tips-shift-left/) this way, if a vulnerability or compliance issue is introduced into the code, it’s identified almost immediately. And improved security doesn’t just benefit your software. It also benefits your customers, your brand reputation, and your overall business.\n\nMost every business, [regardless of size](/blog/can-an-smb-or-start-up-be-too-small-for-a-devops-platform/), is creating software to serve customers, connect with partners and suppliers, and find new revenue streams. But muddling together a string of tools that end up costing time, effort, and money just to maintain and use them isn’t the answer. If SMBs toss that complicated toolchain aside and replace it with one platform, they’ll expand their IT capabilities, reduce costs, and be better able to take on competitors with more experience and deeper pockets.\n",[9,231,793],{"slug":2944,"featured":6,"template":686},"five-essential-business-benefits-a-devops-platform-gives-smbs","content:en-us:blog:five-essential-business-benefits-a-devops-platform-gives-smbs.yml","Five Essential Business Benefits A Devops Platform Gives Smbs","en-us/blog/five-essential-business-benefits-a-devops-platform-gives-smbs.yml","en-us/blog/five-essential-business-benefits-a-devops-platform-gives-smbs",{"_path":2950,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2951,"content":2956,"config":2962,"_id":2964,"_type":14,"title":2965,"_source":16,"_file":2966,"_stem":2967,"_extension":19},"/en-us/blog/forrester-cdra2020",{"title":2952,"description":2953,"ogTitle":2952,"ogDescription":2953,"noIndex":6,"ogImage":1861,"ogUrl":2954,"ogSiteName":670,"ogType":671,"canonicalUrls":2954,"schema":2955},"GitLab and The Forrester Wave: CD and release automation","GitLab named a Strong Performer in Forrester Wave for Continuous Delivery and Release Automation","https://about.gitlab.com/blog/forrester-cdra2020","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab and The Forrester Wave: Continuous Delivery and Release Automation Q2 2020\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Parker Ennis\"}],\n        \"datePublished\": \"2020-07-08\",\n      }",{"title":2957,"description":2953,"authors":2958,"heroImage":1861,"date":2552,"body":2960,"category":726,"tags":2961},"GitLab and The Forrester Wave: Continuous Delivery and Release Automation Q2 2020",[2959],"Parker Ennis","\nHere at [GitLab](/company/), we're fundamentally changing the way that organizations develop and deploy their software by offering a complete [DevOps platform](/solutions/devops-platform/) delivered as a single application. Excitingly enough, GitLab was recently cited as a **Strong Performer** in the Forrester Continuous Delivery and Release Automation (CDRA) report for Q2 2020. For this CDRA wave, Forrester evaluated a wide range of vendors to see how their CDRA capabilities stack up in relation to each other and the market at large. Forrester’s evaluation specifically ranks the strengths and weaknesses based on the capabilities of each vendor’s current offering(s), their product vision/strategy going forward, and their market presence to provide an in-depth analysis that companies can use to make the right decisions when it comes to choosing the best CDRA solution for them.\n\nFourteen vendors were interviewed, researched, and analyzed for this report against 26 types of criteria. \n\nFor those interested, you can access this report directly from Forrester on our [commentary page](/analysts/forrester-cdra20/).\n\n#### **A Little Background**\n\n[Continuous delivery](/topics/continuous-delivery/) and release automation is an area that’s quickly evolving to meet the needs of the market. This puts an immense amount of pressure on vendors to innovate just as rapidly in order to not only compete effectively, but to provide customers with the best possible solution and experience. Fortunately, we’re excited to be moving in the right direction and continuously improving our CDRA capabilities at GitLab. Since 2018, we've made significant investments to add new functionality, improve existing capabilities, and bring both our [continuous delivery direction](/direction/release/continuous_delivery/) and [release orchestration](/direction/release/release_orchestration/) visions to life.\n\n#### **Why is CDRA important?**\n\nAs technology and software development continues to advance at this feverish pace, all businesses, not just vendors, are feeling the pressure more than ever to modernize how they build, test, and deploy their applications. You've probably heard \"every company is a software company\" before and that's exactly what it means.\n\nPrioritizing automation over manual development work is at the center of these transformation efforts. Why's this important? Because how a vendor fares in the realm of CDRA has a direct correlation to the strength and maturity of their [CI/CD capabilities](/topics/ci-cd/). As a result, reports like this CDRA wave act as one of many solid indicators for a vendor's ability to help businesses achieve their goals and automate their software development processes. Naturally, CDRA focuses heavily on the release/deployment automation portion of the software delivery lifecycle, but you can't automate deployments without having a solid CI implementation to automate builds/tests first. Taking that into account, an evaluation like this can go a long way in determining whether a given vendor's solution is not only right for your business today, but where you want your business to be in the future.\n\n#### **Key takeaways in the publication**\n\nHere’s what Forrester determined to be the key takeaways for this Wave:\n\n**CloudBees, IBM, Microsoft, Digital.ai, Broadcom, And Flexagon Lead The Pack**\n\n**Visualizations Of Complex Application And Deployment Models Are Key Differentiators**\n\nAs the continuous delivery market continues to consolidate, and with upstream continuous integration capabilities and higher-order management becoming the norm, vendors are competing increasingly on breadth of functionality. The ability to visualize complex application and deployment models continues to be a differentiator, as does the management of deployment outcomes. Appropriate use of advanced analytics and machine learning is also a key factor, with continuing vendor investment resulting in valuable capabilities such as improved release readiness.\n\n#### **Our highlights**\n\n**GitLab is among vendors with highest score** for these categories:\n\n*   Build automation/continuous integration\n*   Deployment and operations\n*   CDRA vision and value proposition\n*   Product innovation\n*   Market approach and viability\n\n**Forrester’s profile of GitLab:**\n\n**“GitLab is expanding its comprehensive platform quickly.** \n\n“GitLab emerged from the continuous integration side of the market and, with its foundation in source control, has strong headwaters capabilities. GitLab supports continuous integration and deployment to cloud-native platforms, but support for legacy platforms is lacking. More recently, the company has added continuous delivery features, including continuous integration and deployment for Kubernetes. The product bases its application modeling on Helm charts, thus requiring Kubernetes to function. The firm grounds its strategy in a very active open source community and a clear ability to execute on this business mode. GitLab distinguishes itself as one of the fastest-innovating vendors in this evaluation.\"\n\n#### **Looking Ahead**\n\nWe’re dedicated to becoming a standard of excellence in CDRA and are working diligently towards our promise of [Progressive Delivery](/direction/ops/#progressive-delivery) becoming a market-leading solution, as well as many other important roadmap targets such as making [release management easier](/direction/release/release_orchestration/) for our users -- with much more in store. For additional details around exactly what we have planned on the roadmap and our overall vision going forward, check out this [Release vision](https://youtu.be/pzGCishRoh4) video overview from our product team, and learn more about [GitLab's continuous delivery](/stages-devops-lifecycle/continuous-delivery/) functionality.\n\nGitLab already excels in numerous parts of the DevOps lifecycle such as SCM, code review, CI, and cloud native development to name a few of our most [mature](/direction/maturity/) functionality areas. Other vendors find themselves in many reports with many different products, but GitLab is the only vendor who has been listed in multiple reports with the same product, reinforcing the strength of GitLab's message of using a single application for the entire DevOps lifecycle.\n\nWe will continue to improve GitLab and provide the best possible solution for organizations to deliver better software faster. Until next time!\n\n_If you have any questions or would like to contact us about this report, you can reach us [here](/company/contact/)._\n",[977,9,726],{"slug":2963,"featured":6,"template":686},"forrester-cdra2020","content:en-us:blog:forrester-cdra2020.yml","Forrester Cdra2020","en-us/blog/forrester-cdra2020.yml","en-us/blog/forrester-cdra2020",{"_path":2969,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2970,"content":2975,"config":2982,"_id":2984,"_type":14,"title":2985,"_source":16,"_file":2986,"_stem":2987,"_extension":19},"/en-us/blog/forrester-tei",{"title":2971,"description":2972,"ogTitle":2971,"ogDescription":2972,"noIndex":6,"ogImage":1861,"ogUrl":2973,"ogSiteName":670,"ogType":671,"canonicalUrls":2973,"schema":2974},"Estimate your GitLab ROI with Forrester's economic study","Now available: A new Forrester ROI study and calculator based on real value customers got from using GitLab for SCM, CI, and CD.","https://about.gitlab.com/blog/forrester-tei","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Discover your GitLab return on investment with the Forrester Total Economic Impact™ Study and Estimator\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Colin Fletcher\"}],\n        \"datePublished\": \"2020-07-29\",\n      }",{"title":2976,"description":2972,"authors":2977,"heroImage":1861,"date":2665,"body":2979,"category":726,"tags":2980},"Discover your GitLab return on investment with the Forrester Total Economic Impact™ Study and Estimator",[2978],"Colin Fletcher","\n\nWe consistently hear from the global GitLab family (our community, customers, and really anybody interested in GitLab) that they know from experience that GitLab helps them do the work they want to do, faster and better, and that it’s a valuable, even vital, part of their success. But they often have a difficult time describing the value GitLab delivers, especially in specific, quantified ways. We also regularly hear that the hardest part about quantifying \"value\" is knowing where and how to start. \n\n**Enter the Forrester Total Economic Impact™ (TEI) of GitLab: studying real customer experiences**\n \nSo to help everyone better understand the value proposition, GitLab commissioned Forrester Consulting to conduct a [Total Economic Impact™ (TEI) study](/resources/report-forrester-tei/) examining the potential return on investment (ROI) organizations may realize by using GitLab for version control & collaboration (VC&C)/SCM, [continuous integration (CI), and continuous delivery (CD)](/topics/ci-cd/) - all use cases that represent where many teams begin or expand their use of GitLab.  \n\nTo start, GitLab customers were independently interviewed by Forrester Consulting. The interview experiences and any other data collected was then used to create multiple models which in turn generated quantified results based on the combined experiences of all of the customers studied. The data collected, resulting models, and study itself were then reviewed independently by Forrester Research analysts. GitLab stakeholders were also interviewed as part of the data gathering and review process.  \n\n**Significant results and useful tools to discover your ROI**\n\nJust a sampling of the results realized by the composite organization over an analysis period of three years, based on GitLab customer experiences, yielded these potential, quantifiable benefits in the form of:  \n\n- An overall 407% return on investment (ROI) \n- Improved development and delivery efficiency \n  - Ex. 87% improved development and delivery efficiency (reduced time), resulting in over $23 million in savings \n- Revenue from increased number of releases \n  - Ex. 12x increase in the number of revenue generating application releases in a year, resulting in $12.3 million in additional revenue \n- Improved Code Quality \n  - Ex. 80% reduction in code defects, resulting in over $16.8 million in savings \n- Savings from reducing the number of tools in use \n  - Ex. $3.7 million in savings from using four fewer tools (with their associated costs) each year  \n\nNow these results, while impressive, are based on the experiences of the GitLab customers studied and as with all models, your own unique experience will vary. As such we encourage you to spend time looking over [the study](/resources/report-forrester-tei/) to better understand where the numbers came from and how they may or may not relate to your situation and what you are working to achieve.  \n\nTo help you take the next step of estimating your own potential results, we are thrilled to make available an [online estimator](https://tools.totaleconomicimpact.com/go/gitlab/devopsplatform/index.html) that is based on the TEI study’s models. Enter your own data and you'll get a customized version of the study.  \n\n**Couldn’t have done it without you**\n\nLastly, we want to offer our deepest thanks to the incredibly generous GitLab customers who were willing to share their experiences in this way. They helped all of us in our respective journeys. Thank you! \n\n**Get started today!** \n\n- [Download the Forrester Total Economic Impact™ Study commissioned By GitLab, June 2020](/resources/report-forrester-tei/)\n- \u003Ca href=\"https://tools.totaleconomicimpact.com/go/gitlab/devopsplatform/index.html\" target=\"_blank\">Fill out your info in the online estimator and get a custom report based on the TEI study data and models\u003C/a>\n",[109,749,9,726,2981,1829],"research",{"slug":2983,"featured":6,"template":686},"forrester-tei","content:en-us:blog:forrester-tei.yml","Forrester Tei","en-us/blog/forrester-tei.yml","en-us/blog/forrester-tei",{"_path":2989,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2990,"content":2996,"config":3002,"_id":3004,"_type":14,"title":3005,"_source":16,"_file":3006,"_stem":3007,"_extension":19},"/en-us/blog/four-approaches-to-gitlab-integrations",{"title":2991,"description":2992,"ogTitle":2991,"ogDescription":2992,"noIndex":6,"ogImage":2993,"ogUrl":2994,"ogSiteName":670,"ogType":671,"canonicalUrls":2994,"schema":2995},"4 approaches to GitLab integrations","Learn about use cases that help extract even more value from a DevSecOps platform.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667946/Blog/Hero%20Images/4-facets-of-gitlab-integration.png","https://about.gitlab.com/blog/four-approaches-to-gitlab-integrations","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"4 approaches to GitLab integrations\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kurt Dusek\"}],\n        \"datePublished\": \"2023-01-26\",\n      }",{"title":2991,"description":2992,"authors":2997,"heroImage":2993,"date":2999,"body":3000,"category":769,"tags":3001},[2998],"Kurt Dusek","2023-01-26","\n\nThe benefit of a DevSecOps platform is to create a foundation upon which an organization can build its entire development process. Rather than having to log onto several different systems to manage, observe, and advance through the software development lifecycle, DevSecOps teams have one application to serve as their system of record. To augment the platform and create even more business value, organizations can create integrations with third-party software and systems, while still maintaining a unified experience for stakeholders, developers, and operators.\n\nLet's look at what integrations are possible and the use cases that drive them.\n\n## What can be integrated with GitLab\n\nAs a senior solutions architect for Alliances here at GitLab, I often get asked, \"How can I integrate GitLab with X?\" My response: That depends on what's being integrated. X could be a cloud provider, point tool, legacy application or web service that might be used in the development cycle. \n\n## How to integrate with GitLab\n\nThere are four approaches to GitLab integrations:\n\n1. Use GitLab to deploy client applications to X / Host GitLab runners on X\n2. Host GitLab Server on X\n3. Integrate with the development cycle\n4. Deep GitLab application integration\n\nLet's dig deeper into each one.\n\n### 1. Use GitLab to deploy client applications to `X` or Host GitLab runners on `X`\nA very common use case and typically the easiest to achieve. For instance, platform providers, who want to make it easy for their users to run apps built with GitLab on their infrastructure or application server, are often asked for this option. The path is to have GitLab Server be able to authenticate to the hosting platform, and deploy the (ideally containerized) application to the platform.\n\nA close cousin of this is the need to deploy [GitLab runners](https://docs.gitlab.com/runner/) to the infrastructure and register them with a GitLab instance, be it GitLab.com or a self-managed instance. Runners are easy to setup and register, and can be [configured and scaled in many different ways](https://docs.gitlab.com/runner/fleet_scaling/). \n\n### 2. Host GitLab Server on `X`\nPlatform providers are also asked to host GitLab Server on their infrastructure. What makes this easy is GitLab runs almost anywhere; if you've got Linux, you can run GitLab Server (even on a Raspberry Pi). The work has already been done for the major cloud providers, including [GCP](https://docs.gitlab.com/ee/install/google_cloud_platform/), [AWS](https://docs.gitlab.com/ee/install/aws/), [Azure](https://docs.gitlab.com/ee/install/azure/), and [Oracle Cloud](https://docs.oracle.com/en/solutions/deploy-gitlab-ci-cd-oci/index.html). If you want to run on your own infrastructure, the [Omnibus](https://docs.gitlab.com/omnibus/) installer does most of the heavy lifting for you; it's the easiest way to self-host GitLab.  \n\n### 3. Integrate with the development cycle\nHere's where it starts to get a bit more involved. The good news is that GitLab has extensive [APIs](https://docs.gitlab.com/ee/api/) and [webhooks](https://docs.gitlab.com/ee/user/project/integrations/webhooks.html) that allow for listening for events and pushing and pulling data.\n\nIf the goal is to integrate with the [CI/CD pipeline](https://docs.gitlab.com/ee/ci/index.html), this can be done by creating a container image that encapsulates the application or scripts necessary and defining a job within the pipeline that uses this image to run the integration. It's likely the integrated app produces some output that **someone** needs to review. Displaying this output directly within the Merge Request elevates third-party data rather than something that has to be searched for in another system.  Depending on the nature of the tool being integrated, it's possible to show results and a [security report](https://docs.gitlab.com/ee/development/integrations/secure.html#report), [metrics report](https://docs.gitlab.com/ee/ci/testing/metrics_reports.html), or [artifact](https://docs.gitlab.com/ee/ci/pipelines/job_artifacts.html#expose-job-artifacts-in-the-merge-request-ui) that can contain almost any type of data.\n\n### 4. Deep GitLab application integration\nThis is the most complex since it requires an understanding of the [architecture of the GitLab application](https://docs.gitlab.com/ee/development/architecture.html#simplified-component-overview), and how an outside service will interact with and support this architecture. An example of this would be a managed PostgresSQL or Redis service. There's a potential risk of downtime if this type of integration goes wrong, so it's important to test thoroughly in a production-like environment before considering it production-ready. Fortunately GitLab publishes several tools to do this. [GitLab Performance Tool (GPT)](/handbook/support/workflows/gpt_quick_start.html) provides an excellent way to measure and report on the performance of a GitLab instance under various usage scenarios. Its counterpart [GitLab Browser Performance Tool](https://gitlab.com/gitlab-org/quality/performance-sitespeed) tests the browser performance of various GitLab pages.  \n\nRead more on [Kurt Dusek's blog](https://blog.scientifik.org/).\n",[231,282,9],{"slug":3003,"featured":6,"template":686},"four-approaches-to-gitlab-integrations","content:en-us:blog:four-approaches-to-gitlab-integrations.yml","Four Approaches To Gitlab Integrations","en-us/blog/four-approaches-to-gitlab-integrations.yml","en-us/blog/four-approaches-to-gitlab-integrations",{"_path":3009,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3010,"content":3016,"config":3021,"_id":3023,"_type":14,"title":3024,"_source":16,"_file":3025,"_stem":3026,"_extension":19},"/en-us/blog/four-tips-to-increase-your-devops-salary",{"title":3011,"description":3012,"ogTitle":3011,"ogDescription":3012,"noIndex":6,"ogImage":3013,"ogUrl":3014,"ogSiteName":670,"ogType":671,"canonicalUrls":3014,"schema":3015},"Four tips to increase your DevOps salary","You have a great career with a solid salary, but can you do better? (Hint: of course.) Here's how.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668124/Blog/Hero%20Images/moneyfarm_background.jpg","https://about.gitlab.com/blog/four-tips-to-increase-your-devops-salary","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Four tips to increase your DevOps salary\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2021-10-20\",\n      }",{"title":3011,"description":3012,"authors":3017,"heroImage":3013,"date":3018,"body":3019,"category":769,"tags":3020},[810],"2021-10-20","\n\n_This is the second in an occasional series looking at DevOps salaries and careers. Find out [how your DevOps salary stacks up](/blog/a-look-at-devops-salaries/)._\n\nSalaries for DevOps professionals are strong, despite a pandemic and a global economic crisis. However, you can still command an even higher salary with four straightforward strategies.\n\nVarious surveys have shown the IT industry is thriving right now and DevOps professionals, in particular, are [increasing in demand and value](https://about.gitlab.com/blog/a-look-at-devops-salaries/). DevOps repeatedly ranks well on some reputable lists such as Robert Half’s [15 highest paying IT jobs](https://www.roberthalf.com/blog/salaries-and-skills/the-13-highest-paying-it-jobs-in-2019) and Glassdoor’s 2021 list of [Best Jobs in America](https://www.glassdoor.com/research/best-jobs-in-america-for-2021/). \n\nIn [an August jobs report](https://www.prnewswire.com/news-releases/nationwide-tech-hiring-surges-in-second-quarter-per-dice-q2-tech-job-report-301351520.html), Dice CEO Art Zeile called this “one of the hottest tech job markets since the dot-com era,” and pointed to the upward trend in tech job postings since November 2020.\n\n## How to increase your salary\n\nBy following these strategies, DevOps professionals can take advantage of this strong market to boost your paychecks.\n\n### 1. Gain more experience\n\nExperience level is a big driver when it comes to how much money DevOps professionals will be taking home. [The Randstad 2021 Salary Guide](https://rlc.randstadusa.com/for-business/learning-center/salary-insights/salary-guide/IT-technologies) shows a more than $27,000 difference between the annual salary of a DevOps developer with one year of experience ($112,785) and someone with five years of experience ($140,242). An additional 10 years of experience can garner another $25,000 bump, according to the Randstad Salary Guide.\n\nExperience doesn’t have to happen sequentially, however. In our [2021 Global DevSecOps Survey](/developer-survey/) we found more than  69% of respondents participate in “sideline” open source projects. Those extracurricular efforts can look great on a resume and also are a way to showcase niche skills.\n\n### 2. Expand your education\n\nEmployers also are looking for DevOps professionals to continue to increase their skill set, such as learning new coding languages and scripting skills, according to Glassdoor and Robert Half.\nDevOps professionals also should stay up-to-date on new frameworks, automation, data management and security systems. And don’t forget the importance of analytics skills, configuration management and DevOps platforms. As we all know, technology is a moving target and being able to not only use the latest technology but also explain its importance to executives and other business leaders will make you a more valuable employee.\n\n### 3. Pursue certifications\n\nWant to show your employer - or a future employer - that you have the skills to work on a business-critical DevOps platform? The proof is sometimes in the certification. Think about getting certified in [Kubernetes](https://training.linuxfoundation.org/certification/certified-kubernetes-application-developer-ckad/), [Docker](https://prod.examity.com/docker/), Puppet or [Ansible](https://www.redhat.com/en/services/training/ex407-red-hat-certified-specialist-in-ansible-automation-exam?section=Overview). And of course there’s an option to become a [GitLab Certified Associate](https://about.gitlab.com/services/education/gitlab-certified-associate/). Certifications help an employer understand your functional knowledge of their business systems.\n\n### 4. Improve your soft skills \n\nYes, it’s critical that you know how to make the technology work and how to keep projects running on time and on budget, but you also should concentrate on “soft skills,” like communication, collaboration and leadership, if you’re aiming to qualify for a better salary. In 2020 our survey takers all agreed that soft skills were the most important thing for their future careers, and they remained the second choice of most survey takers this year as well. \n\nCompanies need professionals who understand the business’ needs, can communicate how a DevOps platform can solve key challenges and can explain the competitive advantage gained from a strong DevOps strategy. Soft skills enable professionals to operate as a team, endure stressful moments and work through difficult problems.\n\nDevOps professionals are in demand, putting you in a strong earning position. So make sure you are doing all you can to show you deserve a higher salary.\n\n## Read more on DevOps careers: \t\t\n\n- [Best advice for your DevOps career? Keep on learning](/blog/best-advice-for-your-devops-career-keep-on-learning/)\n\n- [6 tips to make software developer hiring easier](/blog/6-tips-to-make-software-developer-hiring-easier/)\n\n- [DevOps salaries in 2021: Where do you rank?](/blog/a-look-at-devops-salaries/)\n\n- [Have DevOps jobs to fill? Try these 3 strategies to hire and retain](/blog/have-devops-jobs-to-fill-try-these-3-strategies-to-hire-and-retain/)\n\n",[813,9,2535],{"slug":3022,"featured":6,"template":686},"four-tips-to-increase-your-devops-salary","content:en-us:blog:four-tips-to-increase-your-devops-salary.yml","Four Tips To Increase Your Devops Salary","en-us/blog/four-tips-to-increase-your-devops-salary.yml","en-us/blog/four-tips-to-increase-your-devops-salary",{"_path":3028,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3029,"content":3034,"config":3039,"_id":3041,"_type":14,"title":3042,"_source":16,"_file":3043,"_stem":3044,"_extension":19},"/en-us/blog/free-period-for-cicd-external-repositories",{"title":3030,"description":3031,"ogTitle":3030,"ogDescription":3031,"noIndex":6,"ogImage":1861,"ogUrl":3032,"ogSiteName":670,"ogType":671,"canonicalUrls":3032,"schema":3033},"The free period of CI/CD for GitHub is ending soon","The free-of-charge use of CI/CD for GitHub is ending soon, so you'll need to upgrade to continue using this feature.","https://about.gitlab.com/blog/free-period-for-cicd-external-repositories","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The free period of CI/CD for GitHub is ending soon\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Parker Ennis\"}],\n        \"datePublished\": \"2020-03-12\",\n      }",{"title":3030,"description":3031,"authors":3035,"heroImage":1861,"date":3036,"body":3037,"category":726,"tags":3038},[2959],"2020-03-12","\n\n[CI/CD for GitHub](/solutions/github/) is a feature that lets you use any Git-based repository as a host in combination with GitLab CI/CD regardless of where your source code lives – [GitHub](https://docs.gitlab.com/ee/ci/ci_cd_for_external_repos/github_integration.html), [Bitbucket](https://docs.gitlab.com/ee/ci/ci_cd_for_external_repos/bitbucket_integration.html), or any other Git server. To introduce this feature to the large number of users with private repos hosted on GitHub.com, we made it available to users [free of charge](/blog/six-more-months-ci-cd-github/) for a limited time only.\n\nWe then [extended the free period](/blog/ci-cd-github-extended-again/) for an additional limited time. We’ve set the final end date for this free period for March 22, 2020.\n\nIf you wish to continue using [CI/CD for private external repositories](https://docs.gitlab.com/ee/ci/ci_cd_for_external_repos/index.html) past March 22, 2020, you will need to upgrade your plan to at least a [Silver plan](/pricing/).\n\nOf course, you always have the option of migrating your project to [GitLab.com](https://docs.gitlab.com/ee/user/project/import/github.html). As part of our commitment to our value of transparency and open source, all public repositories on GitLab.com get all of the features in our top-tier Gold plan for free. If your repo on GitHub.com is public, then it gets mirrored to GitLab.com as a public repo and you have access to CI/CD capabilities.\n\nNote: If you are only using [repository mirroring](https://docs.gitlab.com/ee/user/project/repository/repository_mirroring.html#pulling-from-a-remote-repository) without CI/CD then you only need a Bronze plan to continue using this functionality.\n\nSo, what exactly does this mean for you?\n\n*  The ability to mirror private external repositories and run CI/CD on them will no longer be available as of March 22, 2020, unless the repositories have been made public or you have upgraded to an eligible GitLab plan.\n*  Since [GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/) can only be published through GitLab CI, users who were using a GitHub repository with private projects and haven't upgraded to an eligible GitLab plan will be unable to have private pages.\n\nWe've designed this process to be a smooth transition for our users. If you have any additional questions about the change, or how this impacts you and your teams, please don’t hesitate to reach out:\n*  For general questions or pricing inquiries, please contact our **[Sales team](/sales/)**.\n*  For technical questions or concerns, please review our **[Support options](/support/)**.\n\nThanks!\n",[109,9,267,682],{"slug":3040,"featured":6,"template":686},"free-period-for-cicd-external-repositories","content:en-us:blog:free-period-for-cicd-external-repositories.yml","Free Period For Cicd External Repositories","en-us/blog/free-period-for-cicd-external-repositories.yml","en-us/blog/free-period-for-cicd-external-repositories",{"_path":3046,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3047,"content":3053,"config":3058,"_id":3060,"_type":14,"title":3061,"_source":16,"_file":3062,"_stem":3063,"_extension":19},"/en-us/blog/free-trial-gitlab-gold",{"title":3048,"description":3049,"ogTitle":3048,"ogDescription":3049,"noIndex":6,"ogImage":3050,"ogUrl":3051,"ogSiteName":670,"ogType":671,"canonicalUrls":3051,"schema":3052},"Take GitLab Gold out for a test drive","Free trials of GitLab Gold are now available – here's how to get started.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670123/Blog/Hero%20Images/moving-to-gitlab-cover.png","https://about.gitlab.com/blog/free-trial-gitlab-gold","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Take GitLab Gold out for a test drive\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2018-09-19\",\n      }",{"title":3048,"description":3049,"authors":3054,"heroImage":3050,"date":3055,"body":3056,"category":299,"tags":3057},[745],"2018-09-19","\n\nIf you’re like me, you probably \"try\" new software and different products all\nthe time. I’ve lost track of how many trials I’ve started, but I know that the\ntrial helps me make a better decision about what to do next.\n\nThe same is true with GitLab; you really should try it out to get a sense about\nhow much power and value is at your fingertips. What’s the difference between\nusing the open source version of GitLab and the enterprise features of GitLab.com Gold / GitLab Ultimate? Night and day.\n\nHere are a few things that you should explore in a GitLab trial:\n* Security ([SAST](https://docs.gitlab.com/ee/user/application_security/sast/), [DAST](https://docs.gitlab.com/ee/user/application_security/dast/), and [dependency scans](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/))\n* [Portfolio management](/solutions/agile-delivery/) and tracking epics and roadmaps\n* [Licence management](https://docs.gitlab.com/ee/user/compliance/license_compliance/index.html)\n* [Kubernetes](/solutions/kubernetes/) integration and management\n* [LDAP](https://docs.gitlab.com/ee/administration/auth/ldap/index.html) integration\n* [Merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/) with multiple reviewers\n\nUntil now, if you wanted to explore these great GitLab features, your only option\nwas to [download GitLab Ultimate](/free-trial/), install it, spin up your self-managed instance,\nand start your trial.\n\nWe’ve heard from customers that they wanted an easier way to try the enterprise\nfeatures of GitLab without the extra work to download, install, and configure.\nNow, you can skip straight to trying GitLab with our new **GitLab Gold Trial**.\n**GitLab.com Gold** is our hosted version of Gitlab Ultimate, where you can quickly\nexplore and test the end-to-end [DevOps lifecycle](/topics/devops/) features that make GitLab a one-stop\nsolution for your entire delivery process.\n\n## Get started\n\nAre you ready to explore **GitLab Gold**? You can get started in three simple steps:\n1.  [Register  on GitLab.com](https://gitlab.com/users/sign_in#register-pane) **(If you already have an account, please skip this step)**\n2.  Sign into GitLab.com\n3.  [Click here to start your free trial of GitLab Gold](https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com&glm_content=blog-2018-09-19b)\n![GitLab Gold Trial](https://about.gitlab.com/images/blogimages/GitLab_Gold_Trial.png){: .shadow.center.medium}\n\nPlease make sure you're signed into GitLab.com so you can go to the trial signup page.\n\n![GitLab Gold](https://about.gitlab.com/images/blogimages/GitLabGold.png){: .shadow.center.medium}\n",[726,9],{"slug":3059,"featured":6,"template":686},"free-trial-gitlab-gold","content:en-us:blog:free-trial-gitlab-gold.yml","Free Trial Gitlab Gold","en-us/blog/free-trial-gitlab-gold.yml","en-us/blog/free-trial-gitlab-gold",{"_path":3065,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3066,"content":3071,"config":3076,"_id":3078,"_type":14,"title":3079,"_source":16,"_file":3080,"_stem":3081,"_extension":19},"/en-us/blog/from-dev-to-devops",{"title":3067,"description":3068,"ogTitle":3067,"ogDescription":3068,"noIndex":6,"ogImage":1527,"ogUrl":3069,"ogSiteName":670,"ogType":671,"canonicalUrls":3069,"schema":3070},"Complete DevOps is DevOps reimagined. Here's what that looks like","It's all systems go on Complete DevOps! We've re-imagined the scope of DevOps to bring development and operations work into a single application.","https://about.gitlab.com/blog/from-dev-to-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Complete DevOps is DevOps reimagined. Here's what that looks like\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2017-10-11\",\n      }",{"title":3067,"description":3068,"authors":3072,"heroImage":1527,"date":3073,"body":3074,"category":299,"tags":3075},[1609],"2017-10-11","\n\nUpdate: for the most recent status of complete DevOps please see our [Product Vision](/direction/) page.\n\nEarlier this week [we announced our #CompleteDevOps vision](/blog/gitlab-raises-20-million-to-complete-devops/). Let's take a closer look at what that means, and how it's different from traditional DevOps.\n\n\u003C!-- more -->\n\n## Traditional vs. Complete DevOps\n\nIn the early days of software development the process of taking an idea to production was slow, insecure and vulnerable to errors. DevOps emerged as a way to foster collaboration and create faster iteration cycles with greater quality and security. As it sits today, DevOps is a set of practices at the intersection of development and operations. It was a huge step forward.\n\n\u003Cimg src=\"/images/blogimages/dev-to-devops-intersection.png\" alt=\"Intersection of Dev and Ops\" style=\"width: 500px;\"/>{: .shadow}\n\nBut it didn't go far enough.\n\nEven with the [adoption of DevOps](/topics/devops/), serious challenges continue to exist. Developers and operators used to be separate groups with separate tools. The people are now closer together but their tools are still apart. This hinders dev and ops teams from working together. Trying to glue their tools together with traditional DevOps applications doesn't solve the fundamental problem of having separate applications.\n\n\u003Cimg src=\"/images/blogimages/dev-to-devops-tools.jpg\" alt=\"Distinct tools of developers and operators\" style=\"width: 800px;\"/>{: .shadow}\n\n## Why Complete DevOps?\n\n\u003Cimg src=\"/images/blogimages/dev-to-devops-union.png\" alt=\"Union of Dev and Ops\" style=\"width: 500px;\"/>{: .shadow}\n\nComplete DevOps reimagines the scope of tooling to include both developers and operations teams in one unified solution. This dramatically reduces friction, increases collaboration, and drives a competitive advantage. Doing away with context switching and having all the necessary information in one place closes the loop and enables a better understanding of each team's needs.\n\n\u003Cimg src=\"/images/blogimages/dev-to-devops-advantages.jpg\" alt=\"The advantages of Complete DevOps\" style=\"width: 800px;\"/>{: .shadow}\n\n To make our vision a reality, we're working on a number of new features and improving on existing ones. You can take an in-depth at some of these in our Head of Product [Mark Pundsack](/company/team/#MarkPundsack)'s [outline here](/blog/devops-strategy/), or watch the full presentation about our Complete DevOps vision below.\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/5dhjw-TT964?start=1437\" frameborder=\"0\" allowfullscreen>\u003C/iframe>\n\nYou can also browse the slides at your leisure:\n\n\u003Ciframe src=\"https://docs.google.com/presentation/d/e/2PACX-1vRVKUjMMa7M7lPV04_TMgfmd2Fj_kEQYW9-RvKAtKf799_Dwbfvos8diqinI-Uhm1uTwPYCdAPPzun1/embed?start=false&loop=false&delayms=3000\" frameborder=\"0\" width=\"1280\" height=\"749\" allowfullscreen=\"true\" mozallowfullscreen=\"true\" webkitallowfullscreen=\"true\">\u003C/iframe>\n\nShare your thoughts, comments, and questions about #CompleteDevOps with us on [Twitter](https://twitter.com/gitlab)!\n",[728,9,726],{"slug":3077,"featured":6,"template":686},"from-dev-to-devops","content:en-us:blog:from-dev-to-devops.yml","From Dev To Devops","en-us/blog/from-dev-to-devops.yml","en-us/blog/from-dev-to-devops",{"_path":3083,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3084,"content":3090,"config":3096,"_id":3098,"_type":14,"title":3099,"_source":16,"_file":3100,"_stem":3101,"_extension":19},"/en-us/blog/from-idea-to-production-on-thousands-of-clouds",{"title":3085,"description":3086,"ogTitle":3085,"ogDescription":3086,"noIndex":6,"ogImage":3087,"ogUrl":3088,"ogSiteName":670,"ogType":671,"canonicalUrls":3088,"schema":3089},"From idea to production on thousands of clouds","Deliver cloud native applications in more places consistently at scale with GitLab and Gravity.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679266/Blog/Hero%20Images/blue-lights.jpg","https://about.gitlab.com/blog/from-idea-to-production-on-thousands-of-clouds","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"From idea to production on thousands of clouds\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ev Kontsevoy\"}],\n        \"datePublished\": \"2019-11-20\",\n      }",{"title":3085,"description":3086,"authors":3091,"heroImage":3087,"date":3093,"body":3094,"category":299,"tags":3095},[3092],"Ev Kontsevoy","2019-11-20","\nToday, deploying an application with GitLab is easier than ever: just create a Kubernetes cluster on your cloud of choice, connect it to GitLab with the Kubernetes integration, and Auto DevOps creates a full deployment pipeline for you.\n\nBut what if you need your app to run in two clusters in two separate regions? Ten clusters across multiple cloud providers? A hundred clusters and also on a fleet of self-driving trucks?\n\nAt [Gravitational](https://gravitational.com), we believe the future should not belong to a single cloud provider and developers should be able to run their applications anywhere with the same simplicity as having a single Kubernetes cluster.\n\nI am a huge fan of GitLab. I’ve had the great pleasure of getting to know much of the founding team [over the years](https://about.gitlab.com/blog/gitlab-joins-forces-with-gravitational/) and was happy to provide my [own contribution](https://gitlab.com/gitlab-org/gitlab-foss/issues/22864) to the community a while back. Today, I’m happy to share some thoughts on how to build with GitLab and deploy applications into dozens or even hundreds of cloud environments. \n\n## The rise of multicloud\n\nHow do you run applications in different data centers? Do you need to rewrite them from scratch? AWS may still be the dominant cloud provider, but cloud competitors are eating into their lead. It’s not just the big public cloud companies either. [Private cloud data centers](https://www.forbes.com/sites/jasonbloomberg/2019/02/02/have-private-clouds-finally-found-their-place-in-the-enterprise/#2f859685604f) are growing just as rapidly.\n\nMany companies that need to meet tough security and compliance requirements will require applications to run in their bare metal data centers. Running an application on an on-premises or even air-gapped data center adds additional complexity due to the hundreds or even thousands of dependencies in modern applications.\n\nGravitational has built Gravity, an open source [Kubernetes packaging solution ](https://gravitational.com/gravity/)that allows developers to build “cluster images” (similar to VM images) that can contain an entire Kubernetes cluster pre-loaded with multiple applications. You would use GitLab to go from idea to production, and Gravity to expand your production to anywhere in the world. \n\nStatements like, “I have snapshotted our entire production environment and emailed it to you, so you can run it in your private data center,” will not seem completely crazy.\n\nGravity uses standard, upstream CNCF-supported tooling for creating \"images\" of Kubernetes clusters containing the applications and their dependencies. The resulting files are called cluster images which are just .tar files.\n\nA cluster image can be used to recreate full replicas of the original environments for any deployment environment where compliance and consistency matter, i.e. in locked-down AWS/GCP/Azure environments or even in air-gapped server rooms. Each image includes all dependencies to spin up a full cluster, as well as the Gravity daemon that handles the most common operational tasks associated with Kubernetes applications, and it monitors and alerts human operators of problems.\n\n## Deploy with GitLab, scale with Gravity\n\n![Gravity dashboard](https://about.gitlab.com/images/blogimages/gravity-dashboard.png)\n\nDevelopers can leverage a GitLab repository as a single source of truth for rolling out a Kubernetes app and leverage [GitLab CI/CD](https://docs.gitlab.com/ee/ci/) for continuous delivery.\n\nAny project of meaningful scale begins by defining an [epic](https://docs.gitlab.com/ee/user/group/epics/) with goals, milestones, and tasks. An [issue](https://docs.gitlab.com/ee/user/project/issues/#issues) is the main object for collaborating on ideas and planning work. GitLab’s [package and container registry](https://about.gitlab.com/stages-devops-lifecycle/package/) helps you manage and package dependencies. \n\n[The GitLab Kubernetes integration](https://docs.gitlab.com/ee/user/project/clusters/) allows customers to create Kubernetes clusters, utilize review apps, run pipelines, use web terminals, deploy apps, view pod logs, detect and monitor Kubernetes, and much more. For deploying a Kubernetes cluster in a single destination, GitLab provides everything you need from start to finish. \n\nHowever, if your customers need to run your application in their private data centers, they can use Gravity, which essentially copy/pastes the entire Kubernetes cluster environment you’ve built in GitLab. \n\n[Download](https://gravitational.com/gravity/download/) and set up the Gravity open source edition following our [quickstart guide](https://gravitational.com/gravity/docs/quickstart/). From Gravity, you can build a cluster image of your Kubernetes application. Gravity’s [documentation](https://gravitational.com/gravity/docs/overview/) will walk you through the steps required to build an image manifest that describes the image build, the installation process, and the system requirements for the cluster. \n\nYou can build empty Kubernetes cluster images to quickly create a large number of identical, production-ready Kubernetes clusters within an organization, or you can build a cluster image that also includes Kubernetes applications to distribute your application to third parties. \n\n## Next steps\n\nIf you want to learn more about working with Kubernetes, start with [Kubernetes 101](https://www.youtube.com/watch?v=rq4GZ_GybN8). You’ll learn how GitLab and Kubernetes interact at various touchpoints. And, if you’re looking for a way to port your applications to new environments, check out [Gravity](https://gravitational.com/gravity). \n\n## About the guest author\n\nEv is a co-founder and the CEO of Gravitational. Before Gravitational, he launched the on-demand OpenCompute servers at Rackspace. Prior to Rackspace, he co-founded Mailgun, the first email service built for developers. Ev has been a fighter against unnecessary complexity in software for 20 years. He abhors cars but loves trains and open source software that doesn't require an army of consultants to operate.\n\n## About Gravitational\n\n[Gravitational](https://gravitational.com) helps companies deliver cloud applications across cloud providers, on-premises environments, and even air-gapped server rooms. Products include Teleport for multi-cloud privileged access management that doesn't get in the way of developer productivity, and Gravity, a Kubernetes packaging solution that takes the drama out of on-prem deployments. Gravitational was founded in 2015 and recently [announced their Series A](https://gravitational.com/blog/gravitational-series-a-funding/). \n\nCover image by [Sharon McCutcheon](https://unsplash.com/@sharonmccutcheon) on [Unsplash](https://unsplash.com/photos/TMwHpCrU8D4)\n",[1041,9,231,1477,109,936],{"slug":3097,"featured":6,"template":686},"from-idea-to-production-on-thousands-of-clouds","content:en-us:blog:from-idea-to-production-on-thousands-of-clouds.yml","From Idea To Production On Thousands Of Clouds","en-us/blog/from-idea-to-production-on-thousands-of-clouds.yml","en-us/blog/from-idea-to-production-on-thousands-of-clouds",{"_path":3103,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3104,"content":3110,"config":3115,"_id":3117,"_type":14,"title":3118,"_source":16,"_file":3119,"_stem":3120,"_extension":19},"/en-us/blog/future-proof-your-developer-career",{"title":3105,"description":3106,"ogTitle":3105,"ogDescription":3106,"noIndex":6,"ogImage":3107,"ogUrl":3108,"ogSiteName":670,"ogType":671,"canonicalUrls":3108,"schema":3109},"Future-proof your developer career","Roles are changing and AI is coming. We asked 14 DevOps practitioners, analysts, and GitLab execs how to future-proof your career.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679588/Blog/Hero%20Images/future-of-software-future-proof-your-career.png","https://about.gitlab.com/blog/future-proof-your-developer-career","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Future-proof your developer career\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-10-30\",\n      }",{"title":3105,"description":3106,"authors":3111,"heroImage":3107,"date":3112,"body":3113,"category":679,"tags":3114},[851],"2020-10-30","\n\n_This is the fourth and final part of our series on the future of software development. Part one examined [how the software developer role is changing](/blog/software-developer-changing-role/). Part two highlighted [“future” technologies likely to impact the way software is created](/blog/how-tomorrows-tech-affects-sw-dev/). Part three looked at [the role artificial intelligence (AI) will play in software development](/blog/ai-in-software-development/)._\n\nChanging roles, emerging technologies, and the promise (or threat) of artificial intelligence are colliding, creating a critical question for software developers: how should you future-proof your career?\n\nAnyone in the technology industry knows change is both swift and expected – remember [Moore’s Law](https://www.investopedia.com/terms/m/mooreslaw.asp)? But there’s change and then there’s a “big C” *Change* that would impact skills and potentially careers. The [World Economic Forum, writing on the Pluralsight blog](https://www.pluralsight.com/blog/career/tech-in-2025), shared a worrisome observation about the future: “Across nearly all industries, the impact of technological and other changes is shortening the shelf-life of employees’ existing skill sets... ”\n\nSo what skills will be sufficient to navigate the future? We asked 14 [DevOps](/topics/devops/) practitioners, analysts, and GitLab execs for their best advice.\n\n## Embrace the soft skills\n\nIn our 2020 [Global DevSecOps Survey](/developer-survey/), developers, security pros, ops team members, and testers were unanimous in their choice of the most important skills for the future: communication and collaboration. It’s not particularly surprising – DevOps team members are increasingly finding themselves working even more closely together and often in different or new areas of the company. Communication and collaboration in those cases can be the difference between success and failure.\n\n“You can’t have one brain that knows it all,” explains [Darwin Sanoy](/company/team/#DarwinJS), senior solutions architect, Americas, at GitLab. “You need communication and collaboration to work together.”\n\nOne way developers can fine-tune collab skills is to use their open source skills within their organizations, a practice known as “inner sourcing,” says [Jose Manrique Lopez de la Fuente](https://www.linkedin.com/in/jose-manrique-lopez-de-la-fuente-b869884/), CEO at Bitergia, and also a [GitLab Hero](/community/heroes/). “You’re not doing open source alone,” Manrique says. “There are hundreds of developers worldwide also doing it. So, with those skills I learned working with other developers, how can I be transparent with people who are not only connected to my team? How can I get more involved with what’s going on in the company?” The more developers practice this skill, the easier it will get, he predicts.\n\n## It’s not just about tech\n\nAlthough this seems counter-intuitive, future-proofing your career doesn’t necessarily mean boning up on new technologies. In [our survey](/developer-survey/), 28% of developers said [AI was an important skill to know for the future (and they’re probably not wrong)](https://www2.deloitte.com/us/en/insights/focus/signals-for-strategists/ai-assisted-software-development.html), but most experts think it’s not wise to place all your energy in just a single specialty.\n\n“It’s best if you migrate your career from specialty to specialty trying to ride the wave,” Darwin says. “Take a look at what is picking up momentum but is not bleeding edge yet.” GitLab’s director of product management, CI/CD [Jason Yavorska](/company/team/#jyavorska) suggests polishing up the basics. “You want solid tech skills like trouble-shooting, a current knowledge of modern stacks and a lot of basic things,” Jason explains. “You want to be a little bit more of a generalist than an expert in one field.”\n\nThis is definitely a time to take step back and look at the bigger picture, suggests [Philip Lamb](https://www.linkedin.com/in/philliplamb/), global partner senior solutions architect – DevOps at Red Hat. He’s also a proponent of the power of generalization. “Focus less on specific tooling, software, and instead focus more on process and establishing a clear understanding of the sea changes DevOps brings,” he says. And don't forget that DevOps is going to look different for every organization.\n\n## Choose wisely\n\nBut if there’s one thing to keep in mind, above anything else, it’s this: “Avoid what AI is going to be good at,” Jason says. Forrester Research (and many others) think AI [could be creating code in 10 years or less](/blog/ai-in-software-development/). “AI and machine learning could be the most disrupting things to come to your career,” he explains. “If you’ve built your job out of basic things you could find yourself redundant. Focus on things you (as a human) are capable of.”\n",[9,813,1158],{"slug":3116,"featured":6,"template":686},"future-proof-your-developer-career","content:en-us:blog:future-proof-your-developer-career.yml","Future Proof Your Developer Career","en-us/blog/future-proof-your-developer-career.yml","en-us/blog/future-proof-your-developer-career",{"_path":3122,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3123,"content":3129,"config":3134,"_id":3136,"_type":14,"title":3137,"_source":16,"_file":3138,"_stem":3139,"_extension":19},"/en-us/blog/gary-gruver-interview-post",{"title":3124,"description":3125,"ogTitle":3124,"ogDescription":3125,"noIndex":6,"ogImage":3126,"ogUrl":3127,"ogSiteName":670,"ogType":671,"canonicalUrls":3127,"schema":3128},"IT executives! Take the lead in DevOps transformations","Gary Gruver, author of \"Starting and Scaling DevOps in the Enterprise,\" shares his thoughts on the role of executives in a DevOps transformation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680072/Blog/Hero%20Images/gary-gruver-cover.jpg","https://about.gitlab.com/blog/gary-gruver-interview-post","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"IT executives! Take the lead in DevOps transformations\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2018-06-14\",\n      }",{"title":3124,"description":3125,"authors":3130,"heroImage":3126,"date":3131,"body":3132,"category":679,"tags":3133},[702],"2018-06-14","\n\nThe changes in both workflow and culture during a DevOps transformation highlight the need for IT executives to guide development and operations teams. [Gary Gruver](http://www.garygruver.com), the renowned DevOps consultant, discovered that executives are essential in setting up teams to adopt a DevOps model successfully. Ahead of his June 19 webcast, Gary Gruver sat down with GitLab to share his thoughts on the greatest challenges that executives encounter and to provide tactical steps to help executives support their teams.\n\n## The role of an executive in the DevOps transformation is leading it.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://drive.google.com/file/d/1a9sGyFz9fW9zeXOa7MJGib8fUN4ZrIsA/preview\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\n## One of the biggest challenges is getting everybody on the same page.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://drive.google.com/file/d/1d5w4GRRbHA2poHJocUT-4cBfW2KBEsw2/preview\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## One of the biggest things executives need to do to prepare their teams is giving a common view of the inefficiencies for the entire deployment pipeline.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://drive.google.com/file/d/1iseA7ifwF9qIkXDNQun9j0ps-b4_QfuK/preview\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## In terms of culture, executives can help teams adjust to the changes that are coming. Their role is to be that of an investigative reporter.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://drive.google.com/file/d/1788pyYj0QGx8z29YaP6PoseS-OJI_u_d/preview\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## When I work with executives, I always start with, \"What are the objectives about your software development processes?\"\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://drive.google.com/file/d/1ainmogqtovRD0QKq_gJW9XT3_Lii2Hbm/preview\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nJoin Gary Gruver for a webcast on June 19 to discover ways to analyze your current deployment pipeline to target your first improvements on the largest inefficiencies in software development and deployment, and [download your free copy](/resources/scaling-enterprise-devops/) of \"Starting and Scaling DevOps in the Enterprise.\"\n",[9],{"slug":3135,"featured":6,"template":686},"gary-gruver-interview-post","content:en-us:blog:gary-gruver-interview-post.yml","Gary Gruver Interview Post","en-us/blog/gary-gruver-interview-post.yml","en-us/blog/gary-gruver-interview-post",{"_path":3141,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3142,"content":3147,"config":3153,"_id":3155,"_type":14,"title":3156,"_source":16,"_file":3157,"_stem":3158,"_extension":19},"/en-us/blog/generic-semantic-version-processing",{"title":3143,"description":3144,"ogTitle":3143,"ogDescription":3144,"noIndex":6,"ogImage":928,"ogUrl":3145,"ogSiteName":670,"ogType":671,"canonicalUrls":3145,"schema":3146},"SemVer versioning: how we handled it with linear interval arithmetic","SemVer versioning made it difficult to automate processing. We turned to linear interval arithmetic to come up with a unified, language-agnostic semantic versioning approach.","https://about.gitlab.com/blog/generic-semantic-version-processing","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"SemVer versioning: how we handled it with linear interval arithmetic\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Julian Thome\"}],\n        \"datePublished\": \"2021-09-28\",\n      }",{"title":3143,"description":3144,"authors":3148,"heroImage":928,"date":3150,"body":3151,"category":875,"tags":3152},[3149],"Julian Thome","2021-09-28","\nThe [semantic versioning (SemVer) specification](https://semver.org/) can be\nconsidered the de-facto standard for tracking software states during its\nevolution. Unfortunately, in reality many languages/ecosystems practice \"SemVer versioning\" and have not adopted\nthe standard as-is; instead we can find many different semantic versioning\nflavors that are not necessarily compatible with the original SemVer spec. SemVer Versioning has\nled to the creation of a variety of different semantic versioning schemes.\n\nGitLab provides a [Dependency Scanning (DS)](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/)\nfeature that automatically detects vulnerabilities in the dependencies of a\nsoftware project for a variety of different languages. DS relies on the\n[GitLab Advisory Database](https://gitlab.com/gitlab-org/security-products/gemnasium-db)\nthat is updated on a daily basis providing information about\nvulnerable packages that is expressed in the package-specific (native)\nsemantic version dialect. GitLab also recently launched an [Open Source Edition](https://gitlab.com/gitlab-org/advisories-community) of the GitLab Advisory Database.\n\nAt GitLab we use a semi-automated process for advisory generation: we extract\nadvisory data that includes package names and vulnerable versions from\ndata-sources such as [NVD](https://nvd.nist.gov/) and generate advisories that\nadhere to the GitLab advisory format before they are curated and stored in our\n[GitLab Advisory Database](https://gitlab.com/gitlab-org/security-products/gemnasium-db).\n\nThe plethora of SemVer versioning in the wild posed a major\nchallenge for the level of automation we could apply in the advisory generation\nprocess: the different semantic version dialects prevented us from building\ngeneric mechanisms around version matching, version verification (i.e., the\nprocess of verifying whether or not versions are available on the relevant package\nregistry), fixed version inference etc. Moreover, since advisory generation\nrequires us to extract and update advisory data on scale from data-sources with\nhundreds of thousands vulnerability entries, translating and/or verifying\nversions by hand is not a viable, scalable solution.\n\nHaving a generic method to digest and process a variety of different SemVer versioning dialects was an important building block for automating large parts of the advisory generation process. This led to the development of\n[semver_dialects](https://gitlab.com/gitlab-org/vulnerability-research/foss/semver_dialects), a\nutility that helps processing semantic versions in a generic, language-agnostic manner which\nhas been recently open-sourced (MIT) and [published on rubygems.org](https://rubygems.org/gems/semver_dialects).\n\n## Understand the SemVer spec\n\nThe SemVer spec is the de-facto standard for tracking states of software projects during their evolution\nby associating unique, comparable version numbers to distinct states, and by\nencoding semantic properties into the semantic version strings so that a version\nchange implicitly conveys information about the nature of the change.  \n\nA semantic version consists of a prefix (version core) and a suffix that hold\npre-release and/or build information. A version core consists of three numeric\ncomponents that are delimited by `.`:\n\n* major: backwards-incompatible changes\n* minor: new backwards-compatible functionality\n* patch: backwards-compatible bug fixes\n\nConsidering a software project using SemVer, with two releases `1.0.0` and\n`1.0.1`, by just looking at the change applied to the semantic version strings,\nit is clear that `1.0.1` is a newer (more recent) release of the software, whereas version\n`1.0.0` is an older release. In addition, the version number `1.0.1`\nrepresents an improved state of the software as compared to version `1.0.0` which contained a bug\nthat has been fixed in version `1.0.1`. This fix is signalled by the higher number of the patch version component.\n\nSemantic version processing is particularly useful in the context of [Dependency Scanning (DS)](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/). DS is the process of automatically detecting (and potentially fixing)\nvulnerabilities related to the dependencies of a software project: dependencies\nof a software project are checked against a set of configuration files (so\ncalled advisories) that contain information about vulnerable dependencies;\nadvisories usually include the versions of the vulnerable dependency.\nVulnerable versions are usually expressed in terms of version intervals: for example [this out-of-bounds read vulnerability for the Python tensorflow package](https://nvd.nist.gov/vuln/detail/CVE-2021-29560) contains information about the vulnerable version by listing the four version intervals below:\n\n1. up to 2.1.4\n1. from 2.2.0 up to 2.2.3\n1. from 2.3.0 up to 2.3.3\n1. from 2.4.0 up to 2.4.2\n\nWhile SemVer is very concise and clear about the syntax and semantic of\nsemantic versions, it does not specify how to express and represent semantic\nversion constraints. In addition, SemVer is purposefully simplistic to foster\nits adoption. In practice it seems as if many ecosystems required features that\ngo beyond SemVer which led to the development of many SemVer versioning flavours as well\nas a variety of different native constraint matching syntaxes, some of which\ndeviate from the official SemVer specification.  Depending on the ecosystem you\nare working with, the same semantic version string may be treated/interpreted\ndifferently: for example both Maven and pip/PyPI treat versions `1.2.3.SP`\ndifferently because pip/PyPI lacks the notion of an `SP` post release. Apart\nfrom that, `1.2.3.SP` cannot be considered a valid semantic version according\nto the SemVer spec.\n\nToday we have a variety of different semantic versioning schemes:\n\n- `gem`: [gem requirement](https://guides.rubygems.org/specification-reference/#add_runtime_dependency)\n- `maven`: [Maven Dependency Version Requirement Specification](https://maven.apache.org/pom.html#Dependency_Version_Requirement_Specification)\n- `npm`: [node-semver](https://github.com/npm/node-semver#ranges)\n- `php`: [PHP Composer version constraints](https://getcomposer.org/doc/articles/versions.md#writing-version-constraints)\n- `pypi`: [PEP440](https://www.python.org/dev/peps/pep-0440/#version-specifiers)\n- `go`: [go semver](https://godoc.org/golang.org/x/tools/internal/semver)\n- `nuget`: [NuGet semver](https://docs.microsoft.com/en-us/nuget/concepts/package-versioning)\n- `conan`: [node-semver flavour](https://github.com/npm/node-semver#ranges)\n\nThis SemVer versioning fragmentation limited the degree of automation we could apply to our\nadvisory extraction/generation process. This limitation motivated the\ndevelopment of a methodology and tool [semver_dialects](https://gitlab.com/gitlab-org/vulnerability-research/foss/semver_dialects) that helps to digest and process semantic versions in a language agnostic way and, hence, helps to reduce the manual advisory curation effort.\n\nBelow, you can see an excerpt of the advisory information that is extracted and\ngenerated by our semi-automated advisory generation process:\n\n```yaml\n# ...\naffected_range: \">=1.9,\u003C=2.7.1||==2.8\"\nfixed_versions:\n- \"2.7.2\"\n- \"2.8.1\"\nnot_impacted: \"All versions before 1.9, all versions after 2.7.1 before 2.8, all versions\n  after 2.8\"\nsolution: \"Upgrade to versions 2.7.2, 2.8.1 or above.\"\n# ...\n```\n\nIn the excerpt above:\n\n- `affected_range` denotes the range of affected versions which is the machine-readable, native syntax used by the package manager/registry (in this case pypi).\n- `fixed_versions` denotes the concrete versions when the vulnerability has been fixed.\n- `not_impacted` provides a textual description of the versions that are not affected.\n- `solution` provides information about how to remediate the vulnerability.\n\nTo be able to extract and generate advisories like the one illustrated\nabove in a language/ecosystem agnostic way, we implemented and open-sourced a\ngeneric semantic version representation and processing approach called\nsemver_dialects.\n\nIn the advisory excerpt above, the `affected_range` field contains the version\nconstraints in the native constraint syntax (in this case PyPI for Python);\n`fixed_versions` can be inferred by inverting the `affected_version` (i.e.,\nnon-affected versions) and by selecting the first available  version that falls\ninto the range of non-affected versions from the native package registry; this step\nrequires our approach to be able to parse the native semantic version syntax.\n\nIn order to deal with SemVer versioning and automatically process and generate the fields according to this\ndescription, our [semver_dialects](https://gitlab.com/gitlab-org/vulnerability-research/foss/semver_dialects) implementation had to satisfy the following requirements:\n\n1. Provide a unified interface to the language specific dialects.\n1. Match semantic versions in a language agnostic way.\n1. Invert ranges.\n1. Cope with scattered, non-consecutive ranges.\n1. Parse and produce different version syntaxes.\n1. Parse and match versions/constraints in a best-effort manner.\n\n## SemVer versioning representation\n\nFirst, we need a generic representation of a semantic version to start with. We\nassume that a semantic version is composed of prefix and suffix where the\nprefix contains segments for major, minor and patch version components as defined in the\nSemVer specification. The suffix may hold additional information about pre/post\nreleases etc. As illustrated below, the major, minor and patch prefix segments\ncan be accessed by means of the corresponding methods.\n\n``` ruby\ns1 = SemanticVersion.new('1.2.3')\nputs \"segments: #{s1}\"\n# segments: 1:2:3\nputs \"major #{s1.major}\"\n# major 1\nputs \"minor #{s1.minor}\"\n# minor 2\nputs \"patch #{s1.patch}\"\n# patch 3\n```\n\nWe cannot generally assume that all provided versions we would like to process\nfully adhere to the SemVer spec which requires a version prefix (core) to\nconsist of three segments: major, minor and patch. Hence, per default, we\nremove redundant, trailing zeros from the prefix to ensure that\n`2.0.0`, `2.0` and `2` are considered identical.\n\n[Semver_dialects](https://gitlab.com/gitlab-org/vulnerability-research/foss/semver_dialects) translates language specific version suffixes into numeric values. This process\ncan be described as version normalization.  For example the Maven (pre-)release\ncandidate version `2.0.0.RC1` can be translated to a numeric representation\nwith prefix: `2` and suffix `-1:1` by mapping `RC` to a numeric value (in this\nexample `-1`) and, thus, rendering it numerically comparable.\n\nAfter this normalization step, semantic version matching for two versions `vA`\nand `vB` can be implemented by simply numerically comparing their segments in a\npairwise fashion.  For unknown suffices that are not mappable to the numeric\ndomain, we use lexical matching as a default fallback strategy.\n\nIn summary, comparing two semantic versions is a two-step process:\n\n1. Normalization: Extend both semantic versions to have the same prefix length and suffix\n   lengths by appending zeros.\n1. Comparison: Iterate over segments and compare each of them numerically.\n\nFor example, after normalizing the versions `2.0.0.RC1` and `2.0.0` to `2:-1:1`\nand `2:0:0`, respectively, we can iterate over the segments (delimited by\n`:` in the example) which we can compare numerically to successfully identify\n`2:-1:1` as being the smaller (release-candidate) version in comparison to\n`2:0:0`.\n\n## Constraint syntax - everything is a linear interval\n\nTranslating semantic versions into a generic representation makes them\nnumerically comparable which is already useful but not sufficient to express SemVer versioning constraints in a language-agnostic fashion.\n\nFor representing semantic version constraints in a generic way,\nwe rely on linear intervals.  For the purpose of this blog, we define an interval as an ordered pair of two\nsemantic versions which we are referring to as lower and upper\nbounds (or cuts). For the sake of simplicity, for the remainder of\nthis section we will use simple integers as examples for lower and upper bounds, respectively.\n\nLinear intervals capture semantic version ranges symbolically which makes them\nvery versatile and space efficient. At the same time, we can rely on\nwell-established mathematical models borrowed from linear interval arithmetic\nthat enable us to translate/express any type of constraint in terms of\nmathematical set operations on intervals.\n\nIn the table below you can find all the different types of intervals we\nconsidered to model semantic version constraints and a corresponding\ndescription where `L` stands for left, `R` stands for right with `a` and `b`\nbeing the lower and upper bounds, respectively.\n\n| Type of interval | Example                    | Description                               |\n| ---------------- | ---------------------------| ----------------------------------------- |\n| LR-closed        |  `[a,b]: x >= a, x \u003C= b`   | all versions starting from a until b      |\n| L-open R-closed  |  `(a,b]: x > a, x \u003C= b`    | all versions after a until b              |\n| L-closed R-open  |  `[a,b): x >= a, x \u003C b`    | all versions starting from a before b     |\n| LR-open          |  `(a,b): x > a, x \u003C b`     | all versions between a and b              |\n| L-unbounded      |  `(-inf,b]: x \u003C= b`        | all versions until b                      |\n| R-unbounded      |  `[a,+inf): x >= a`        | all versions starting from a              |\n\nBelow you can see example output for the different types of ranges from\n[semver_dialects](https://gitlab.com/gitlab-org/vulnerability-research/foss/semver_dialects) where we are using the `VersionParser` component to generate\nlinear intervals from version constraints where `,` denotes a logical\nconjunction: e.g., `>=1, \u003C=2` denotes the set of integers that are greater than or equal\nto 1 *and* smaller than or equal to two, i.e., all integers/versions numbers starting from 1 until 2.\n\n``` ruby\nputs VersionParser.parse(\">=1, \u003C=2\")\n# [1,2]\nputs VersionParser.parse(\">1, \u003C=2\")\n# (1,2]\nputs VersionParser.parse(\">=1, \u003C2\")\n# [1,2)\nputs VersionParser.parse(\">1, \u003C2\")\n# (1,2)\nputs VersionParser.parse(\"\u003C=2\")\n# (-inf,2]\nputs VersionParser.parse(\">=1\")\n# [1,+inf)\n```\n\nFor solving SemVer versioning constraints, we use linear interval arithmetic\nwhich is explained in-depth in the text-book \"[Introduction to Interval\nAnalysis](https://epubs.siam.org/doi/book/10.1137/1.9780898717716?mobileUi=0&).\"\n\nAs mentioned earlier, for our purposes, we define an interval as an ordered\npair of two semantic versions (lower and upper bound) that represents the set\nof all those semantic versions that are enclosed by lower and upper bounds.\nGiven that intervals are sets, we can perform standard set operations on\nthem.\n\nIn the context of advisory generation, there are three operations we require to\nsatisfy all the requirements we defined earlier: Intersection, Union and Complement.\nThe operations are explained in more detail in the sections below.\n\nFor the remainder of this section, we explain interval operations, using two\nexample intervals `X` and `Y` with `X=[x_l, x_u]` and `Y=[y_l, y_u]` where\n`x_l`, `x_u` denote the lower and upper bounds for `X`, and `y_l`, `y_u` denote\nthe lower and upper bounds for `Y`, respectively. In addition, we are using the\n`min` and `max` functions, where `max(a,b)` returns the largest and `min(a,b)`\nreturns the smallest value of the parameters `a` and `b`; the ∅ symbol denotes\nthe empty set.\n\n### Intersection\n\nThe recipe below illustrates how the intersection (`X` ∩ `Y`) can be computed.\n\n`X` ∩ `Y` = if `X` and `Y` have points in common `[max(x_l,y_l), min(x_u,y_u)]` else ∅\n\nIntuitively, the intersection extracts the overlap (if any) from the two\nintervals `X` and `Y`.\n\nThe code snippet below shows how the intersection is computed in [semver_dialects](https://gitlab.com/gitlab-org/vulnerability-research/foss/semver_dialects) for the two examples:\n\n1. `[2,5]` ∩ `[3,10]`\n1. `[2,5]` ∩ `[7,10]`\n\n``` ruby\n# 1. [2,5] ∩ [3,10] = [3, 5]\nputs VersionParser.parse(\">=2, \u003C=5\").intersect(VersionParser.parse(\">=3, \u003C=10\"))\n# [3,5]\n\n# 2. [2,5] ∩ [7,10] = ∅\nputs VersionParser.parse(\">=2, \u003C=5\").intersect(VersionParser.parse(\">=7, \u003C=10\"))\n# empty\n```\n\nThe intersection operation is useful to perform semantic version matching\nfor checking whether semantic version falls into a certain version interval\nor range. For instance we may want to check whether version `1.2.3` satisfies\nthe constraint `>=1.0.0, \u003C1.2.4`. In the context of [Dependency Scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/), these types of\nconstraints are very common. The problem `1.2.3` ∈ `[1.0.0, 1.2.4)` can be\ntranslated to a set intersection: `[1.2.3, 1.2.3]` ∩ `[1.0.0, 1.2.4)` =\n`[1.2.3, 1.2.3]`  which returns a non-empty set and, hence, tells us that\nversion `1.2.3` satisfies the given version constraints.\n\nIn the context of our advisory generation process, we use intersection to\ncross-validate versions from vulnerability reports (CVEs) with versions of the\navailable package that are available on the package registry that serves it.\n\nFor convenience, as mentioned earlier, [semver_dialects](https://gitlab.com/gitlab-org/vulnerability-research/foss/semver_dialects) also supports grouping\nintervals into ranges by means of the `VersionRange` class. A range is a set of intervals\nwhich we denote with `{I0, I1, ..., IN}` where `I` denotes version intervals\ndelimited by `,` which can be interpreted as a union operator (explained in the next section).\n\nA range is a set of intervals. In the example below, we first create a range\n`r1` to which we are adding two intervals: `r1 = {[2.2.1, 5.1.2], (3.1, 10)}`.\nAfter that, there is a check for an overlap (i.e., an intersection) between\n`r1` and `[0, 2.1)` (no overlap) as well as `[5.5, 5.5]` (overlap). You can see\nthe output of [semver_dialects](https://gitlab.com/gitlab-org/vulnerability-research/foss/semver_dialects) in the excerpt below.\n\n``` ruby\nr1 = VersionRange.new\nr1.add(VersionParser.parse(\">=2.1.2, \u003C=5.1.2\"))\nr1.add(VersionParser.parse(\">3.1, \u003C10\"))\n\nputs \"[0,2.1) in #{r1}? #{r1.overlaps_with?(VersionParser.parse(\">=0, \u003C2.1\"))}\"\n# [0,2.1) in [2.1.2,5.1.2],(3.1,10)? false\nputs \"[5.5,5.5] overlap with #{r1}? #{r1.overlaps_with?(VersionParser.parse(\"=5.5\"))}\"\n# [5.5,5.5] overlap with [2.1.2,5.1.2],(3.1,10)? true\n```\n\n### Union\n\nThe recipe below illustrates how the union (`X` ∪ `Y`) can be computed.\n\n`X` ∪ `Y` = if `X` and `Y` have points in common `{[min(x_l,y_l), max(x_u,y_u)]}` else `{X,Y}`\n\nThe code snippet below shows how the union can be computed with\n [semver_dialects](https://gitlab.com/gitlab-org/vulnerability-research/foss/semver_dialects) for the two examples:\n1. `[2,5]` ∪ `[3,10]` = `{[2,5], [3,10]}` = `{[2, 10]}`\n2. `[2,5]` ∪ `[7,10]` = `{[2,5], [7,10]}`\n\nWith the union operator, we can collapse version intervals in case they have an\noverlap/intersection; otherwise, if `X` and `Y` are disjoint, we add their\nintervals directly to the range.\n\n``` ruby\n# 1. [2,5] ∪ [3,10] = [2, 10]\nputs \"union: #{VersionParser.parse(\">=2, \u003C=5\").union(VersionParser.parse(\">=3, \u003C=10\"))}\"\n# union: [2,10]\n\n# Version ranges perform union two for the purpose of automatically collapsing\n# intervals (if possible)\nr1 = VersionRange.new\nr1.add(VersionParser.parse(\">=2, \u003C=5\"))\nr1.add(VersionParser.parse(\">=3, \u003C=10\"))\nputs \"r1: #{r1}\"\n# union: [2,5],[3,10]\nputs \"r1 collapsed: #{r1.collapse}\" # creates the union between intervals\n# r1 collapsed: [2,10]\n\n# 2. [2,5] ∪ [7,10] = {[2, 10], [7,10]}\nr2 = VersionRange.new\nr2.add(VersionParser.parse(\">=2, \u003C=5\"))\nr2.add(VersionParser.parse(\">=7, \u003C=10\"))\nputs \"r2: #{r2}\"\n# r2: [2,5],[7,10]\n```\n\nIn the context of [Dependency Scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/), vulnerability data usually lists a set of intervals for\ndependencies that are susceptible to a given vulnerability like the [tensorflow example](https://nvd.nist.gov/vuln/detail/CVE-2021-29560) in the introduction where the following versions are affected:\n\n1. up to 2.1.4\n1. from 2.2.0 up to 2.2.3\n1. from 2.3.0 up to 2.3.3\n1. from 2.4.0 up to 2.4.2\n\nThis list of intervals can be represented as a single range (`VersionRange`) by\ncombining all of the mentioned version intervals through the union operator.\n\nIn the Ruby code example above, you can also see the `collapse` method which is\ninvoked on a `VersionRange` object. This method automatically collapses\noverlapping intervals that are included in the same `VersionRange` to eliminate\nredundant intervals. Collapsing the range `{[2, 5], [3, 10]}` yields a new range\n`{[2,10]}` with only one interval while preserving semantic equivalence.\n\n### Complement\n\nThe recipe below, illustrates how the relative complement (`X` - `Y`) can be computed.\n\n`X` - `Y`: `Z` := `X` ∩ `Y`;\n        if (`z_l` > `x_l` && `z_u` \u003C `x_u`)\n          `{[x_l, z_l),(z_u, x_u]}`\n        else if (`x_l` \u003C `z_l`)\n          `{[x_l, z_l)}`\n        else if (`x_u` > `z_u`)\n          `{(z_u, x_u]}`\n\nIntuitively, this recipe computes the intersection (`Z`) between `X` and `Y` and\nremoves all elements from `X` that are included in the intersection. The\nexamples below illustrate the recipe:\n\n1. `[3, 5]` - `[1, 3]`: with `Z` = `[3, 3]` we get `{(3, 5]}` which is\n   equivalent to `{[4, 5]}`\n1. `[3, 10]` - `[10, 11]`: with `Z` = `[10, 10]` we get `{[3, 10)}` which is equivalent to `{[3, 9]}`\n1. `[1, 5]` - `[2, 2]`: with `Z` = `[2, 2]` we get `[1, 2), (2, 5]` which is equivalent to `{[1, 1], [3, 5]}`\n\nWith the recipe above, we can also compute the absolute complement `X` - `Y` by\nassuming `X` is the universe that captures the entirety of all possible values:\n`(-inf,+inf)`. The universal complement can be defined as `~X` = `(-inf,+inf)` - `X`.\n\nWith [semver_dialects](https://gitlab.com/gitlab-org/vulnerability-research/foss/semver_dialects), the absolute complement can be computed by means of the\n`invert` method as illustrated in the example below.\n\n``` ruby\n# example 1: ~[1,3] = {(-inf,0],[4, +inf)} = {(-inf,1),(3,+inf)}\nr1 = VersionRange.new\nr1.add(VersionParser.parse(\">=1, \u003C=3\"))\nputs r1.invert\n# (-inf,1),(3,+inf)\n\n# example 2: ~{[2.1.2, 5.1.2], (3.1, 10)} = ~{[2.1.2, 10)} = {(-inf,2.1.2),[10,+inf)}\n{(-inf,0],[4, +inf)} = {(-inf,1),(3,+inf)}\nr2 = VersionRange.new\nr2.add(VersionParser.parse(\">=2.1.2, \u003C=5.1.2\"))\nr2.add(VersionParser.parse(\">3.1, \u003C10\"))\nputs r2.collapse.invert\n# (-inf,2.1.2),[10,+inf)\n```\n\nIn the context of [Dependency Scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/), this functionality is used to automatically infer\nnon-affected versions from the affected versions information: if `[1, 3]`\nrepresents all the affected versions of a vulnerable package, its complement\n`{(-inf,1),(3,+inf)}`, per definition, captures only the unaffected version. In\nour advisory generation process we cross-validate the version information of\npackages from the package registries with this information about unaffected versions to check whether or not unaffected packages are available; if this is the case, we add the corresponding remediation information to the generated advisories.\n\n## Version Translation\n\nLinear interval arithmetic provides us with all the means necessary to\nrepresent and solve SemVer versioning constraints in a language-agnostic way.\nHowever, in order to leverage the generic representation, we have to be able to\nautomatically translate the native semantic version dialects into the generic\nrepresentation and vice versa. The details of this translation functionality\nare provided below.\n\n[Semver_dialects](https://gitlab.com/gitlab-org/vulnerability-research/foss/semver_dialects) offers a `VersionTranslator` class. The `VersionTranslator` takes a native semantic version constraint, and translates\nit into an intermediate string representation that can then be translated into a range (`VersionRange`) by using the `VersionParser`. Currently semver_dialects supports all the syntax listed below by invoking\n`translate_\u003Cpackage_type>` where `\u003Cpackage_type>` is one of:\n\n- `gem`: [gem requirement](https://guides.rubygems.org/specification-reference/#add_runtime_dependency)\n- `maven`: [Maven Dependency Version Requirement Specification](https://maven.apache.org/pom.html#Dependency_Version_Requirement_Specification)\n- `npm`: [node-semver](https://github.com/npm/node-semver#ranges)\n- `packagist`: [PHP Composer version constraints](https://getcomposer.org/doc/articles/versions.md#writing-version-constraints)\n- `pypi`: [PEP440](https://www.python.org/dev/peps/pep-0440/#version-specifiers)\n- `go`: [go semver](https://godoc.org/golang.org/x/tools/internal/semver)\n- `nuget`: [NuGet semver](https://docs.microsoft.com/en-us/nuget/concepts/package-versioning)\n- `conan`: [node-semver flavour](https://github.com/npm/node-semver#ranges)\n\nThe example below illustrates how the [semver_dialects](https://gitlab.com/gitlab-org/vulnerability-research/foss/semver_dialects)' `VersionTranslator` can\nbe used to translate native version syntax to an intermediate representation.\nThe `VersionTranslator` parses the native version syntax and translates it into\na common format. In the example below, you can further see that both\nnative, semantically equivalent but syntactically different version strings for\npackagist and maven are translated into a common format: a string array\nwhere a single array entry represents a conjunct of the semantic version\nconstraints. This translation step removes all language-specific features\nfrom the native semantic version constraints.\n\n``` ruby\n# native packagist version constraint syntax\nvs_packagist = \"\u003C2.5.9||>=2.6.0,\u003C2.6.11\"\n# native maven version constraint syntax\nvs_maven = \"(,2.5.9),[2.6.0,2.6.11)\"\n\n# translate\nputs VersionTranslator.translate_packagist(vs_packagist).to_s\n# [\"\u003C2.5.9\", \">=2.6.0 \u003C2.6.11\"]\nputs VersionTranslator.translate_maven(vs_maven).to_s\n# [\"\u003C2.5.9\", \">=2.6.0 \u003C2.6.11\"]\n```\n\nThis common format can then be translated to a version interval by means of\n`VersionParser` and `VersionRange`. The example below illustrates how the\nversion interval `constraint` is generated by iterating over the array elements\nof our intermediate representation, translating them to intervals and adding\nthese intervals to the `VersionRange` object `constraint`. At the end of the\nexcerpt below, we check whether version `1.0.0` satisfies the version\nconstraint `\u003C2.5.9||>=2.6.0,\u003C2.6.11` which correctly yields `true`.\n\n``` ruby\n# translate native maven version constraint to range of interval\nconstraint = VersionRange.new\nVersionTranslator.translate_maven(vs_maven).each do |version_string|\n  constraint \u003C\u003C VersionParser.parse(version_string)\nend\n\nputs constraint.overlaps_with?(VersionParser.parse('=' + '1.0.0'))\n# true\n```\n\n## Wrapping it up\n\nWe discussed the fragmentation of SemVer versioning which poses a challenge\nwhen building automation around semantic version processing for\nmulti-language/ecosystem applications. In this blog post, we used our internal\nsemi-automated process for advisory generation as an example.\n\nWe illustrated how we tackled the above-mentioned challenge by building a\ngeneric/language-agnostic semantic version approach based on linear interval\narithmetic. All mechanisms discussed in this blog post are implemented in the open-sourced (MIT)\n[semver_dialects](https://gitlab.com/gitlab-org/vulnerability-research/foss/semver_dialects) implementation and published on [rubygems.org](https://rubygems.org/gems/semver_dialects).\n",[875,9,682],{"slug":3154,"featured":6,"template":686},"generic-semantic-version-processing","content:en-us:blog:generic-semantic-version-processing.yml","Generic Semantic Version Processing","en-us/blog/generic-semantic-version-processing.yml","en-us/blog/generic-semantic-version-processing",{"_path":3160,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3161,"content":3167,"config":3174,"_id":3176,"_type":14,"title":3177,"_source":16,"_file":3178,"_stem":3179,"_extension":19},"/en-us/blog/get-ready-for-commit",{"title":3162,"description":3163,"ogTitle":3162,"ogDescription":3163,"noIndex":6,"ogImage":3164,"ogUrl":3165,"ogSiteName":670,"ogType":671,"canonicalUrls":3165,"schema":3166},"How to get the most out of GitLab Commit","We’re taking over the Williamsburg neighborhood of Brooklyn and opening up our world to you. Here’s everything you need to know.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664134/Blog/Hero%20Images/gitlabcommitbrooklyn.png","https://about.gitlab.com/blog/get-ready-for-commit","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to get the most out of GitLab Commit\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emily Kyle\"}],\n        \"datePublished\": \"2019-09-13\",\n      }",{"title":3162,"description":3163,"authors":3168,"heroImage":3164,"date":3170,"body":3171,"category":299,"tags":3172},[3169],"Emily Kyle","2019-09-13","\nWe’re (almost!) ready – are you? The inaugural GitLab Commit in Brooklyn is just around the corner.  We wanted to share some details on the event and how to get the most out of it while you’re onsite and after it’s over.\n\nWhen planning this event we made the strategic choice to avoid convention centers and stuffy hotel ballrooms. We like to be a bit more playful (and out of the box) in how we approach our community and events. We decided to model this event after a block party and on Tuesday September 17 we’ll be taking over 8 venues. On your walk from the subway you’ll notice the street has been “GitLabbed” and will be full of signs. Everything – signs, content, staff – will welcome you. If we did it right it will be quirky, fun, innovative, collaborative, and inclusive. A small block in Brooklyn will _be_ GitLab for a day. We can’t wait to share that vision with everyone attending.\n\nStart by checking in at the Williamsburg Hotel, 96 Wythe Ave, Brooklyn, NY 11249\n(between Nassau St. on the G or Bedford Ave. on the L). Don’t forget to grab breakfast treats and coffee.\nOver the course of the day we will also have sessions and activities in and around the following\nvenues: the  [Wythe Hotel](https://wythehotel.com), [Schimanski](https://www.schimanskinyc.com),\n[Brooklyn Bowl](https://www.brooklynbowl.com), [Kinfolk 90](https://kinfolklife.com/locations/kinfolk-90/) & [Kinfolk 94](https://kinfolklife.com/locations/kinfolk-94/). Each venue serves a specific function and has its own personality but each one flows seamlessly into the next one.\n\n![Map of GitLab Commit](https://about.gitlab.com/images/blogimages/gitlabcommitmap.png){: .shadow.small.center.wrap-text}\n\nIt’s a neighborhood takeover!\n{: .note.text-center}\n\n## Remember to schedule\n\nYou can find the schedule [here](https://gitlabcommit2019brooklyn.sched.com). To get the most out of your day on site, we suggest building out your schedule in the sched link just mentioned so you can reserve your slot in each of the tracks. There will be 3 tracks – cloud native, DevOps in action, and powered by GitLab – and each will be color coded to help you navigate throughout the day.\n\n## And it’s not over yet…\n\nWe will close out the day of sessions at the historic Brooklyn Bowl directly following the day’s packed lineup for networking, food & beverages and of course bowling. The party kicks off at 5 pm.\n\n## Other important details\n\nLooking for a well brewed cappuccino or latte?  Kinfolk 90 will be serving Commit attendees with a badge from 12pm-5pm at no charge, so drop in between sessions.\n\nHave questions about our product offerings, a nagging support item, want deeper insight into our security offerings, or time to visit with some of our sponsors?  The Library at the Williamsburg Hotel will be open all day for some one-to-one interaction.\n\nLastly we have a [few spots still open](https://about.gitlab.com/events/commit/) if you want to get in on this action packed day of learning! There is also still time to sign up for our [London event](/events/commit/#) in October.\n",[267,1515,277,9,3173],"frontend",{"slug":3175,"featured":6,"template":686},"get-ready-for-commit","content:en-us:blog:get-ready-for-commit.yml","Get Ready For Commit","en-us/blog/get-ready-for-commit.yml","en-us/blog/get-ready-for-commit",{"_path":3181,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3182,"content":3188,"config":3194,"_id":3196,"_type":14,"title":3197,"_source":16,"_file":3198,"_stem":3199,"_extension":19},"/en-us/blog/get-started-ci-pipeline-templates",{"title":3183,"description":3184,"ogTitle":3183,"ogDescription":3184,"noIndex":6,"ogImage":3185,"ogUrl":3186,"ogSiteName":670,"ogType":671,"canonicalUrls":3186,"schema":3187},"How to use GitLab’s CI/CD pipeline templates","Learn how pipeline templates and Auto DevOps can get you up and running on GitLab CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667139/Blog/Hero%20Images/CI-pipeline-templates.jpg","https://about.gitlab.com/blog/get-started-ci-pipeline-templates","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab’s CI/CD pipeline templates\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2020-09-23\",\n      }",{"title":3183,"description":3184,"authors":3189,"heroImage":3185,"date":3190,"body":3191,"category":679,"tags":3192},[788],"2020-09-23","\nWriting deployment pipelines from scratch is a real pain in the branch. We want to make the [continuous integration](/topics/ci-cd/) experience more automatic so teams can get up and running quickly with [GitLab CI/CD](/topics/ci-cd/).\n\nAn easy way to get started is with GitLab’s CI/CD pipeline templates. Pipeline templates come in **more than 30** popular programming languages and frameworks. We’ll show you how to use these pipeline templates for your specific needs.\n\nFor an even more automatic continuous integration experience, we also offer [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) that does much of the legwork for you. Auto DevOps runs on pipelines automatically when a [Dockerfile or matching buildpack](https://docs.gitlab.com/ee/topics/autodevops/stages.html#auto-build) exists, and identifies dependencies automatically.\n\n## What are CI pipeline templates?\n\n[Pipelines](https://docs.gitlab.com/ee/ci/pipelines/) are an integral component of both continuous integration (CI) and [continuous delivery (CD)](/topics/continuous-delivery/), and continuous deployment (the other \"CD\"). A deployment pipeline consists of two things:\n\n*   Jobs, which define _what_ to do. For example, jobs that compile or test code.\n*   Stages, which define _when_ to run the jobs. For example, stages that run tests after stages that compile the code.\n\nPipelines consist of one or more stages that run in order and can each contain one or more jobs that run in parallel. These jobs (or scripts) get run by agents, such as a [GitLab Runner](https://docs.gitlab.com/runner/).\n\nAt GitLab, pipelines are defined in a `gitlab-ci.yml` file. [CI/CD templates](https://docs.gitlab.com/ee/ci/examples/#cicd-templates) incorporate your favorite programming language or framework into this YAML file. Instead of building pipelines from scratch, CI/CD templates simplify the process by having parameters already built-in.\n\nYou can choose one of these templates when you create a `gitlab-ci.yml` file in the UI.\n\n![GitLab CI pipeline templates](https://docs.gitlab.com/ee/ci/img/add_file_template_11_10.png)\n\nBecause our CI/CD templates come in more than 30 popular languages, the chances are good that we have the template you need to get started in our [CI template repository](https://gitlab.com/gitlab-org/gitlab/tree/master/lib/gitlab/ci/templates).\n\n## What is Auto DevOps?\n\n[Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) is a GitLab-exclusive feature that provides predefined CI/CD configurations that automatically detect, build, test, deploy, and monitor your applications. Rather than just accessing a template, Auto DevOps is a setting within your GitLab instance that is [enabled by default](https://docs.gitlab.com/ee/topics/autodevops/#enabled-by-default).\n\nOur [product vision for Auto DevOps](/direction/delivery/auto_devops/) is that everything is fully connected as part of one great GitLab experience. The term Auto DevOps actually comes from the different parts that are automated by Auto DevOps:\n\n*   \"Auto CI\" – Compile and test software based on best practices for the most common languages and frameworks.\n*   \"Auto review\" – Automatic analysis tools like Code Climate.\n*   \"Auto deploy\" – Based on [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/) and incremental rollouts on Kubernetes clusters.\n*   \"Auto metrics\" – Collect statistical data from all the previous steps in order to guarantee performances and optimization of the whole process.\n\nAuto DevOps provides great defaults for all the stages and makes use of CI templates. You can [customize Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/customize.html) to meet your needs, and [manage Auto DevOps with GitLab APIs](https://docs.gitlab.com/ee/topics/autodevops/customize.html#extend-auto-devops-with-the-api).\n\nLearn more about Auto DevOps, check out this video:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/0Tc0YYBxqi4\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Other CI/CD resources\n\nGitLab also provides [CI/CD examples](https://docs.gitlab.com/ee/ci/examples/) so you can learn how to implement GitLab CI/CD for your specific use case. In addition to template files, you can find repositories with sample projects, and step-by-step tutorials for a variety of scenarios, including:\n\n*   [DevOps and Game Dev with GitLab CI/CD](https://docs.gitlab.com/ee/ci/examples/)\n*   [Test and deploy a Ruby application with GitLab CI/CD](https://docs.gitlab.com/ee/ci/examples/)\n*   [How to deploy Maven projects to Artifactory with GitLab CI/CD](https://docs.gitlab.com/ee/ci/examples/)\n*   ... And many others\n\nWith CI/CD templates and our Auto DevOps product feature, teams can start reaping the benefits of continuous integration without all of the manual configurations. For teams managing sometimes _hundreds_ of projects, it’s not realistic or doable to start from scratch. And with GitLab, you don’t have to.\n\nCurious about our best-in-class continuous integration? [Try GitLab free for 30 days](/free-trial/).\n\n## Related reads\n\n*   [\"A beginner's guide to continuous integration\"](/blog/a-beginners-guide-to-continuous-integration/)\n\n*   [\"Want a more effective CI/CD pipeline? Try our pro tips\"](/blog/effective-ci-cd-pipelines/)\n\n*   [\"3 CI/CD challenges to consider\"](/blog/modernize-your-ci-cd/)\n\nCover image by [chuttersnap](https://unsplash.com/@chuttersnap?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/laboratory?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[109,3193,9],"UI",{"slug":3195,"featured":6,"template":686},"get-started-ci-pipeline-templates","content:en-us:blog:get-started-ci-pipeline-templates.yml","Get Started Ci Pipeline Templates","en-us/blog/get-started-ci-pipeline-templates.yml","en-us/blog/get-started-ci-pipeline-templates",{"_path":3201,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3202,"content":3208,"config":3213,"_id":3215,"_type":14,"title":3216,"_source":16,"_file":3217,"_stem":3218,"_extension":19},"/en-us/blog/get-started-compliance-as-code",{"title":3203,"description":3204,"ogTitle":3203,"ogDescription":3204,"noIndex":6,"ogImage":3205,"ogUrl":3206,"ogSiteName":670,"ogType":671,"canonicalUrls":3206,"schema":3207},"Why building compliance as code in DevOps will benefit your entire company","Read here on how to integrate compliance as code into your DevOps cycle and why it's important to have in your business","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680734/Blog/Hero%20Images/compliance-as-code-header.jpg","https://about.gitlab.com/blog/get-started-compliance-as-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why building compliance as code in DevOps will benefit your entire company\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2019-08-19\",\n      }",{"title":3203,"description":3204,"authors":3209,"heroImage":3205,"date":3210,"body":3211,"category":679,"tags":3212},[1016],"2019-08-19","\n\nCompliance, both regulatory and self-imposed, is another area where the shift-left\nmovement has taken hold. By building compliance into your workflow with compliance as code methods, your\nteam can save time while producing secure, low-risk code.\n\n## What is compliance as code?\n\nCompliance as code methods ensure that the correct regulatory or company\ncompliance requirements are fulfilled with zero-touch on the path to production.\nIt builds compliance into development and operations.\n\nThe utilization of compliance as code tools enable stakeholders to ensure that production procesesses are compliant by means of defining how resources must be configured. Such a structure often allows these tools to automatically adjust resources into a compliant state in order to meet these pre-defined compliance requirements.\n\nThis type of minimal-friction compliance is a crucial solution for large\nenterprises – especially those subject to complex regulation (such as enterprises\noperating in healthcare or financial services). By building compliance into the\n[DevOps lifecycle](/topics/devops/), you will streamline the workflow and save developers valuable\ntime during review and testing.\n\n## Benefits of compliance as code\n\nAdopting compliance as code brings a number of advantages and new operational capabilities. \n\n- **It’s easier to stay compliant during compliance rule change periods.** When a change happens in regulatory compliance frameworks, awareness and remediation of any issues happen more quickly because teams don’t have to manually overhaul processes or re-train.\n- **More natural alignment between developers and risk assessment teams.** There is more unity between teams when the compliance controls are already defined as code. It’s then possible to embed compliance rules into delivery processes and enable compliant delivery by default. \n- ** A lot of time and money saved.** Automation cuts out costly and time-consuming manual work. When automated compliance as code is in place, there’s a reduced risk of costly fines and data breaches. \n- **It’s all scalable.** Adopting compliance as code means adopting consistency across teams and an organization, regardless of size. This consistency prevents ambiguity and bottlenecks in maintaining compliance. \n\n## Challenges of compliance as code\n\nDevOps means experiencing changes often and quickly, and despite the benefits that automated compliance as code brings, it can also be a challenge. It can sometimes be difficult for security to keep up with the speed of change.\n\nAnd sometimes, even automated compliance as code isn’t perfect. It’s important to remember that there’s no cap on how careful you should be when it comes to DevOps compliance. Despite having automation in place, a pair or two of human eyes open to keep watch is still useful – even if it means a possible increase in human error. \n\n## How to impliment compliance as code\n\nAs [Jim Bird wrote for O’Reilly](https://www.oreilly.com/learning/compliance-as-code),\ncompliance as code policies must be defined up front, and will bring together\nmanagement, compliance, internal audit, PMO, and infosec leaders. This group\nwill work together to define rules and control workflows. Management also needs\nto understand how operational and other risks will be handled throughout the\npipeline.\n\nHow your company does establish compliance as code policies [will depend on how your team is structured](/topics/devops/build-a-devops-team/)\nbut regardless of how your teams interact, transparency is required. To ensure\nthat information is shared and decisions are made collaboratively, consider\nestablishing the following guidelines:\n\n- **Peer reviews**: The first review cycle for larger changes should be manual, to\nensure no changes are made without at least one other person verifying the\nchange. Reviewers can be assigned randomly to ensure the quality of review.\n- **Static application security testing**: [Static\n(or white box) testing](/blog/developer-intro-sast-dast/) should be done for every code change, in addition to\nmanual reviews.\n- **Subject matter expert reviews for high-risk code**: For code that the management team defines as\nhigh-risk (such as security code), changes should be reviewed by a subject matter\nexpert.\n- **Regulated access controls**: Management should keep access in check, both so that\nchanges aren’t made by a single engineer, and so that every change flows through\nthe workflow and can be reviewed by anyone with access to the dashboard.\n\n### Enhance technology with culture\n\nTechnology and processes will only work if your team cultures are aligned with your goal – and culture starts\nat the top. Team leaders should promote and exemplify a security-first\nmentality and openness to collaborative change. This will be a new way of\nthinking for some, but it will help teams adopt the shift-left trend, ultimately\nsaving everyone time and reducing business risk.\n\n### Compliance and open source\n\nIn 2015, [The Linux Foundation found that more than 60% of companies build products with open source software](https://www.linuxfoundation.org/blog/2015/06/why-companies-that-use-open-source-need-a-compliance-program/), but more\nthan half of those companies don’t have formal procedures in place to ensure their\nsoftware complies with open source licenses and regulations. Companies should\ncreate a free and open source software (FOSS) compliance program not only to\nabide by copyright notices and license obligations, but also to protect company\nIP and third-party source code from disclosure.\n\n## How we do compliance at GitLab\n\nWe [began our formalized compliance program](/blog/choosing-a-compliance-framework/)\ntowards the end of our Series C funding round, which was fairly early compared\nto other businesses of our size. The benefit of starting early was that we were\nable to implement security controls while we were still developing and evolving\nour operating processes, instead of retrofitting security to the business. The\nkey decision in our approach was choosing between independent or aggregate\nsecurity controls: We chose the aggregate route, leveraging [Adobe’s CCF](https://blogs.adobe.com/security/2017/05/open-source-ccf.html),\nrather than implementing industry frameworks individually. This allowed us to\nmitigate overlapping asks to GitLab teams, which enabled an agile and efficient\nprogram standup, and gave the compliance group internal credibility.\n\n## Compliance as code provides benefits across your ecosystem\n\nThere are benefits to everyone from the developer to the third-party auditor when compliance is baked into code from the beginning. These benefits include:\n- **Time saved**: Your\nteams will spend less time passing code fixes back and forth.\n- **Compliance transparency**: Management will\nunderstand where and how your software abides by compliance requirements.\n- **Routine reporting streamlines auditing**: Reports throughout the DevOps lifecycle provide documentation and proofs of\nrecord that will help management track and streamline any regulatory audit\nprocedures.\n\n## Common compliance as code tools\n\nGoogle Cloud Platform, Amazon Web Services, and Azure are all cloud services that can be used in compliance as code. And oftentimes, these tools are even more effective when paired with native tools. \n\nThrough proper tool adoption, the three core actions of a compliance strategy can be automated: prevention, detection, and remediation.\n\nCover image by [Hack Capital](https://unsplash.com/@hackcapital?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/code?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[749,771,9,875,1158,683],{"slug":3214,"featured":6,"template":686},"get-started-compliance-as-code","content:en-us:blog:get-started-compliance-as-code.yml","Get Started Compliance As Code","en-us/blog/get-started-compliance-as-code.yml","en-us/blog/get-started-compliance-as-code",{"_path":3220,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3221,"content":3227,"config":3233,"_id":3235,"_type":14,"title":3236,"_source":16,"_file":3237,"_stem":3238,"_extension":19},"/en-us/blog/get-started-with-microservices-architecture",{"title":3222,"description":3223,"ogTitle":3222,"ogDescription":3223,"noIndex":6,"ogImage":3224,"ogUrl":3225,"ogSiteName":670,"ogType":671,"canonicalUrls":3225,"schema":3226},"Get started with microservices architecture","For DevOps teams ready to take the next step, adopting a microservices architecture is a smart choice. Here's what you need to know.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667875/Blog/Hero%20Images/trends-in-version-control-land-microservices-cover.jpg","https://about.gitlab.com/blog/get-started-with-microservices-architecture","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Get started with microservices architecture\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-09-20\",\n      }",{"title":3222,"description":3223,"authors":3228,"heroImage":3224,"date":3229,"body":3230,"category":769,"tags":3231},[745],"2022-09-20","A great way to jumpstart a DevOps practice is by adopting a microservices architecture. The [benefits of a microservices architecture](/blog/what-are-the-benefits-of-a-microservices-architecture/) are numerous and include improved scalability, enhanced fault isolation, and the ability to bring new features to market faster.\n\n## How to start building with microservices architecture\n\n### Identify decomposable aspects of the application\n\nOne of the main properties of a microservice is its independence, so identifying the decomposable parts of the application — those parts that can work autonomously — is essential. Getting the service boundaries wrong could result in unwanted changes to other services, so you need to understand the system’s domain.\n\nIn many cases, such breakdown aligns with the business domains and is reflected in development teams.\n\n### Determine the metrics to monitor\n\nWith a microservices application, it’s crucial to monitor the status of each service so it’s possible to react to changing demands in the production environment. Some common metrics to monitor include the CPU and memory usage of each host, the API response time, and the error rate.\n\nWithout monitoring, teams may not catch problems when they arise. For example, if a server is overwhelmed by traffic, other services may not respond because they’re trying to communicate with an over-burdened service. \n\nBeing able to visualize these potential issues helps prevent downtime. Therefore, establish metrics early so necessary adjustments can be made as soon as possible.\n\n## Best practices for deploying and managing microservices\n\n### Infrastructure automation\n\nWhen the number of microservices grows, an application can become difficult to manage. Each microservice has its own deployment schedule. \n\nSome features are hidden behind feature flags, some are collecting usage data through A/B testing, and some services might be using Canary deployments as part of a progressive deployment. \n\nAutomated testing is key so teams will have the ability to stop or roll back deployment when necessary.\n\n### Consumer-driven contract tests\n\nWhen other consumers depend on API endpoints in one microservice, it’s good practice to implement consumer-driven contract testing to ensure version compatibility. \n\nTraditionally, developers first create the APIs on the server side and have clients determine which endpoints to call. That means when the signature of an API changes, it can bring down the consumer.\n\nThis can’t happen with consumer-driven contract testing because, before deploying a microservice to production, consumers determine the required contract (API signature) and test to be sure they are still valid.\n\n### Monitor key metrics\n\nOnce key metrics have been determined, they must be constantly monitored and able to respond to any events detected. This can be difficult, but fortunately, there are tools that simplify monitoring and provide comprehensive visualization.\n\n## Microservices architecture and DevOps\n\nBy decomposing a software system into autonomous parts, [microservices architecture](/topics/microservices/) allows companies to apply the single responsibility principle to individual teams. It allows them to manage all aspects of a service independently: the team’s technical stack, team composition, deployment strategies, and even release schedules.\n\nMicroservices architecture, alongside continuous delivery, allows businesses to make decisions based on live production data, thereby expediting feedback loops and reducing the time to market.\n\nTo get started with microservices architecture, it’s a good idea to first develop strong intuitions in decomposing a large system and get a good knowledge base of CI/CD practices. Regardless of the architectural style you choose, these skills will be useful.",[9,1158,3232],"google",{"slug":3234,"featured":6,"template":686},"get-started-with-microservices-architecture","content:en-us:blog:get-started-with-microservices-architecture.yml","Get Started With Microservices Architecture","en-us/blog/get-started-with-microservices-architecture.yml","en-us/blog/get-started-with-microservices-architecture",{"_path":3240,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3241,"content":3247,"config":3252,"_id":3254,"_type":14,"title":3255,"_source":16,"_file":3256,"_stem":3257,"_extension":19},"/en-us/blog/github-free-for-teams",{"title":3242,"description":3243,"ogTitle":3242,"ogDescription":3243,"noIndex":6,"ogImage":3244,"ogUrl":3245,"ogSiteName":670,"ogType":671,"canonicalUrls":3245,"schema":3246},"#GitChallenge: Compare GitLab to GitHub and earn swag","Send us a review of GitLab and GitHub and get swag.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681235/Blog/Hero%20Images/hero-blog-gitlab-github.jpg","https://about.gitlab.com/blog/github-free-for-teams","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"#GitChallenge: Compare GitLab to GitHub and earn swag\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2020-04-14\",\n      }",{"title":3242,"description":3243,"authors":3248,"heroImage":3244,"date":3249,"body":3250,"category":726,"tags":3251},[745],"2020-04-14","\n\nAre you up for a challenge? Compare GitLab and GitHub! If you send us a link to your review on Twitter by tagging @gitlab and #GitChallenge we’ll send you some swag for giving us a try.\n{: .alert .alert-gitlab-purple}\n\nToday, GitHub announced [free private repositories with unlimited collaborators](https://github.blog/2020-04-14-github-is-now-free-for-teams/). This is great news for developers worldwide. GitHub also announced that they are lowering the price of their paid Team product to the same price as [GitLab’s Bronze/Starter](/pricing/premium/) offering: $4 per month per user.\n\nAt GitLab, we’ve offered free private repositories as part of our Core/Free product from the start. We also recently made 18 additional features open source, which will help teams collaborate more effectively in a single product, and we’ve been steadily gaining market share in the version control space, with users switching from BitBucket and GitHub to GitLab.\n\n## What your team loses when you go from GitHub Pro to Free\n\nWhen you go from GitHub Pro to GitHub Free, you lose some features that are already free and available to all users on GitLab and Gitlab.com:\n\n*   Protected branches in private repos\n*   Draft PRs in private repos\n*   GitHub Pages in private repos (using one)\n*   Wikis in private repos\n\n## What your team gains by using GitLab Bronze/Starter\n\nWith GitLab, you get even more features than GitHub Team. When there are multiple users on the same team, use [GitLab Bronze](/pricing/#gitlab-com)/[Starter](/pricing/#self-managed):\n\n*   Code owners in private repos\n*   Multiple issue assignees in private repos\n*   Multiple PR assignees in private repos\n*   Code review automatic assignment in private repos\n*   Standard support\n\n## GitLab is more complete\n\nGitLab is a [complete DevOps platform](/topics/devops/), delivered as a single application. Here is a visual comparison:\n\n![Comparing_GitLab_GitHub](https://about.gitlab.com/images/blogimages/gitlab-github-comparison.jpg){: .shadow}\n\n## Take the #GitChallenge\n\nIt has never been a better time to compare DevOps tools and find the best ones for you.\n\nCompare GitLab (get your [free trial here](/free-trial/)) and GitHub! You can:\n* Record a video and post it on social media\n* Write a blog or Medium post\n* Post your review on one of the many review sites like [G2](https://www.g2.com/products/gitlab/reviews)\n\nAfter you finish your review, send us a link on Twitter by tagging @gitlab and #GitChallenge, and we’ll send you some swag for giving us the feedback!\n",[9,1789],{"slug":3253,"featured":6,"template":686},"github-free-for-teams","content:en-us:blog:github-free-for-teams.yml","Github Free For Teams","en-us/blog/github-free-for-teams.yml","en-us/blog/github-free-for-teams",{"_path":3259,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3260,"content":3266,"config":3271,"_id":3273,"_type":14,"title":3274,"_source":16,"_file":3275,"_stem":3276,"_extension":19},"/en-us/blog/github-launch-continuous-integration",{"title":3261,"description":3262,"ogTitle":3261,"ogDescription":3262,"noIndex":6,"ogImage":3263,"ogUrl":3264,"ogSiteName":670,"ogType":671,"canonicalUrls":3264,"schema":3265},"GitHub Actions affirms all-in-one is eating the marketplace model","GitHub announces GitHub Actions, a continuous integration tool, affirming the need for single application for the entire DevOps lifecycle.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678806/Blog/Hero%20Images/single-application.png","https://about.gitlab.com/blog/github-launch-continuous-integration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitHub Actions affirms all-in-one is eating the marketplace model\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2018-10-16\",\n      }",{"title":3261,"description":3262,"authors":3267,"heroImage":3263,"date":3268,"body":3269,"category":299,"tags":3270},[1609],"2018-10-16","\nGitHub announced the launch of their continuous integration tool, [GitHub Actions](https://blog.github.com/2018-10-16-future-of-software/), entering into competition with 14 of its [continuous integration marketplace vendors](https://github.com/marketplace/category/continuous-integration), including Travis CI, CircleCI, and CodeShip. This isn’t the first time we’ve seen GitHub compete against a popular area of its marketplace; they also competed against marketplace vendors in the [project management](https://github.com/marketplace/category/project-management) (Waffle.io vs. issue boards) and [dependency scanning](https://github.com/marketplace/category/dependency-management) categories (Snyk).\n\nWhy compete with vendors within their own marketplace? Similar to [Amazon’s private brands](https://www.businessinsider.com/amazon-owns-these-brands-list-2018-7), which compete in categories with well-established leaders on its own platform, all-in-one is eating the marketplace model, and GitHub is ready to eat its own marketplace to stay competitive.\n\nToday’s increasingly complex technology landscape demands a simplified and seamless all-in-one solution – and built-in [continuous integration](/solutions/continuous-integration/) is a logical first step. We know this because when we decided to build a [single application for the entire DevOps lifecycle](/why/), integrated pipelines were the critical first step to helping development teams build, test, deploy, and monitor their code. Companies like [Ticketmaster](/blog/continuous-integration-ticketmaster/) and [Paessler AG](/customers/paessler/) have shown us that when teams are working within a seamlessly integrated application experience, cycle times are reduced by as much as 200%, and the speed of pipelines can be reduced from over two hours to within eight minutes.\n\nWhile there will undoubtedly be space for some successful point solutions, we’re seeing a turning point from disparately integrated toolchains to all-in-one solutions in the tech tools landscape.\n\n## Need for speed and simplicity\n\nSoftware development and delivery is getting more complicated, requiring more tools per team and project. The advent of Kubernetes has brought a desire for DevOps and with it an avalanche of highly focused, sharp tools. The proliferation of teams and tools makes toolchain maintenance unmanageable and cumbersome, slowing down cycle times and inhibiting collaboration at a time when speed to market is critical to business success. Chaining together tools comes at too great of a cost. The explosion of microservices has exacerbated the issue. As more development teams embrace cloud native, building and running application in containers, the number of projects multiples and changes need to be made frequently. Disparate toolchains were not built to handle this level of integration complexity.\n\nA single application removes this complexity, providing a single setup, datastore, flow, and interface where teams can work collaboratively and concurrently. It enables [Concurrent DevOps](/topics/concurrent-devops/), removing the need for sequential handoffs, allowing cross-functional collaboration at speed. Developers, engineers, product managers, and security experts can all work on their piece without slowing each other down, allowing better visibility into work in flight, and the opportunity to shift left contributions from various teams.\n\nEliminating context switching, automated links between environments, code, issues, and epics, real-time updates, and everything in context are just a few reasons the all-in-one model beats out the toolchain. For a complete list, see our [advantages of a single application](/handbook/product/single-application/) page.\n\n## GitLab is a complete DevOps platform, delivered as a single application\n\nWe shipped [GitLab CI/CD](/solutions/continuous-integration/) in 2016, and completed our Master Plan to ship the entire software development lifecycle by the end of 2016. For the past two years, we’ve been continuously improving our single application, and we’re now working on packaging, monitoring, Kubernetes, and even [serverless](/topics/serverless/).\n\nWe’ve made a couple of [acquisitions](/handbook/acquisitions/) to integrate great point-solutions into our single application. It’s our prediction that we will see more acquisitions, big and small, across the technology landscape as the demand for an all-in-one solution grows.\n",[726,9,976],{"slug":3272,"featured":6,"template":686},"github-launch-continuous-integration","content:en-us:blog:github-launch-continuous-integration.yml","Github Launch Continuous Integration","en-us/blog/github-launch-continuous-integration.yml","en-us/blog/github-launch-continuous-integration",{"_path":3278,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3279,"content":3285,"config":3292,"_id":3294,"_type":14,"title":3295,"_source":16,"_file":3296,"_stem":3297,"_extension":19},"/en-us/blog/gitlab-14-modern-devops",{"title":3280,"description":3281,"ogTitle":3280,"ogDescription":3281,"noIndex":6,"ogImage":3282,"ogUrl":3283,"ogSiteName":670,"ogType":671,"canonicalUrls":3283,"schema":3284},"Modern DevOps shift in GitLab 14: Speed, trust & visibility","GitLab 14 accelerates modern DevOps, bringing velocity with confidence, built-in security, and visibility into DevOps success.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668091/Blog/Hero%20Images/gitlab-version-14-wide.png","https://about.gitlab.com/blog/gitlab-14-modern-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab 14 signals shift to modern DevOps: A DevOps platform with velocity, trust, and visibility\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brian Glanz\"}],\n        \"datePublished\": \"2021-06-22\",\n      }",{"title":3286,"description":3281,"authors":3287,"heroImage":3282,"date":3289,"body":3290,"category":769,"tags":3291},"GitLab 14 signals shift to modern DevOps: A DevOps platform with velocity, trust, and visibility",[3288],"Brian Glanz","2021-06-22","\n\nThe DevOps era began with a big idea – dissolve silos to deliver better software, faster. In the transition from classic software paradigms, DIY DevOps toolchains were built with parts that were never designed to work together. That DIY DevOps era left many trapped in new silos, without visibility, and mired in maintenance. Business outcomes suffered as the potential of DevOps was never fully realized.\n\n\n## The next iteration of DevOps\nThere is a better way to build software. [GitLab 14](/gitlab-14/) delivers modern DevOps with a [complete DevOps platform](/topics/devops-platform/), for a streamlined experience that unleashes the power of DevOps. Over the past year, GitLab has shipped advanced DevOps platform capabilities that enable any team to build and deliver software with velocity, trust, and visibility – no matter their size, industry, or location.  \n\nWith enhancements across the software development lifecycle, GitLab has placed strongly in several market reports across a broad range of areas from [Enterprise Agile Planning](/analysts/gartner-eapt21/) and [Application Security Testing](/analysts/gartner-ast21/) to [Continuous Delivery and Release Automation](/analysts/forrester-cdra20/). Tying it all together with a platform approach is a keystone of the next shift in the DevOps movement. GitLab was named a representative vendor in a market overview of [DevOps platforms](/analysts/gartner-vsdp21/).\n\nAs a “new normal” is taking shape after the pandemic, companies worldwide are coming to grips with what it means to work in hybrid and remote environments. A modern DevOps solution needs to meet the emerging demands for a more flexible workplace. GitLab has been a pioneer and champion of remote work for years and was recently [mentioned by Fast Company as a world-changing idea](https://www.fastcompany.com/90624506/world-changing-ideas-awards-2021-general-excellence-finalists-and-honorable-mentions). Having unlocked many of the secrets to remote work success, GitLab stepped up to help others out by shipping a [Remote Work Playbook](/company/culture/all-remote/) and a Coursera course on “[How to Manage a Remote Team](https://www.coursera.org/learn/remote-team-management).” Our all-remote know-how and experience went into the development of GitLab 14 to build capabilities that work wherever you do. \n\n\n## Velocity with confidence\nGitLab 14 enables you to increase development velocity and stay confident with a consistent and efficient developer and operator experience, yielding a more predictable DevOps lifecycle. By using one platform for source code management, continuous integration (CI), continuous delivery (CD), infrastructure as code, security, and beyond, teams are more efficient, collaborative, and productive. Our [2021 Global DevSecOps Survey](/blog/why-software-developer-job-satisfaction-matters-and-how-to-make-it-happen/) shows engineers are happier when they can focus on innovation and adding value than when maintaining integrations – and happy developers attract and retain talent.\n\nOrganizations with a mature DevOps culture know the value of managing configuration as code, IT infrastructure as code, and more, with the same platform and best practices used for application development. In GitLab 14, our [Pipeline Editor](/releases/2021/01/22/gitlab-13-8-released/#the-new-pipeline-editor-makes-cicd-easy-to-use) lowers the barrier to entry for CI/CD while also accelerating power users, with visual authoring and versioning, continuous validation, and pipeline visualization. GitLab 14’s [Kubernetes Agent](/blog/gitlab-kubernetes-agent-on-gitlab-com/) enables secure deployment to your cloud-native infrastructure. GitLab 14 also meets customers where they are by supporting GitOps with agent-based and agentless approaches and allows for deployments anywhere, regardless of whether infrastructure is cloud-native.\n\n\n## Visibility into DevOps success\nThe [DevOps Research and Assessment (DORA)](https://www.devops-research.com/research.html) firm’s industry-defining research shows how focused improvement of software delivery performance leads to positive business outcomes like happier customers, greater market share, and increased revenue. Focusing efforts requires measuring four metrics in particular that are highly correlated with business performance. These are deployment frequency, lead time for changes, time to restore service, and change failure rate. \n\nAs a complete DevOps platform, GitLab 14 is uniquely capable of delivering visibility into DevOps with out of the box measurement and visualization of operational metrics, including DORA metrics, that have come to define DevOps maturity. With that visibility comes confidence in the ability to drive both team performance and competitive advantage. \n\nGitLab 14 also takes the key next step toward actionability, with an array of customizable Value Stream Analytics to optimize workflows. Constituent analytics like mean time to merge can uncover bottlenecks such as dysfunction in code review, allowing management to identify the root causes of slowdowns in the DevOps lifecycle, and enabling IT leaders to align with business priorities.\n\n\n## Built-in security\nSecurity without sacrifice – the promise of [DevSecOps](/topics/devsecops/) – is realized with built-in security for platform-driven alignment that decreases exposure, while keeping projects on-time and on-budget. In a world where security is everyone’s responsibility, automating processes and policies gives developers and security pros the information they need to meet this responsibility.  \n\nEnforcing security on every commit is a matter of course in GitLab 14’s CI/CD, providing real-time feedback as development is happening. A Semgrep analyzer for application security testing offers access to a global rule registry and customization for policy requirements. Acquisitions of Fuzzit and Peach Tech, and GitLab’s new proprietary browser-based DAST crawler, test modern APIs and Single Page Applications (SPAs) demonstrating innovation to meet requirements of modern DevOps. New vulnerability management capabilities increase visibility, providing the controls and observability needed to protect the software factory and its deliverables.\n\n\n## Everyone can contribute\nGitLab 14 has been built by the company and the community together to advance global adoption of modern DevOps. \n\nThanks to GitLab’s open core model, more than 10,000 merge requests from the wider community have been merged into the product since January 2016. The wider community contributes alongside more than 1,300 GitLab team members, all working remotely from 68 countries. GitLab believes in a world where everyone can contribute.\n\nGitLab has more than 30 million estimated registered users, from startups to global enterprises, including Ticketmaster, Jaguar Land Rover, Nasdaq, Dish Network, Comcast, and [more who have shared their stories](/customers/), and who trust GitLab to deliver great software, faster.\n",[9,728],{"slug":3293,"featured":6,"template":686},"gitlab-14-modern-devops","content:en-us:blog:gitlab-14-modern-devops.yml","Gitlab 14 Modern Devops","en-us/blog/gitlab-14-modern-devops.yml","en-us/blog/gitlab-14-modern-devops",{"_path":3299,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3300,"content":3305,"config":3310,"_id":3312,"_type":14,"title":3313,"_source":16,"_file":3314,"_stem":3315,"_extension":19},"/en-us/blog/gitlab-15-the-retrospective",{"title":3301,"description":3302,"ogTitle":3301,"ogDescription":3302,"noIndex":6,"ogImage":1449,"ogUrl":3303,"ogSiteName":670,"ogType":671,"canonicalUrls":3303,"schema":3304},"GitLab 15: The retrospective","GitLab was founded in 2011 but that was a world nearly unrecognizable today. Here's a look back at what life was like then.","https://about.gitlab.com/blog/gitlab-15-the-retrospective","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab 15: The retrospective\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2022-06-13\",\n      }",{"title":3301,"description":3302,"authors":3306,"heroImage":1449,"date":3307,"body":3308,"category":769,"tags":3309},[766],"2022-06-13","\nNo cloud native, no containers, and no remote work: Those were just a few of the things _missing_ from the technology landscape in 2011 when we launched GitLab 1.0. It’s been a journey, for sure. Here’s a look back at how far we’ve traveled to get to GitLab 15.\n\n## It started with source code management\n\nIn the beginning of GitLab there was source code management (SCM)... and that was it. Continuous integration (CI) became part of GitLab because our co-founder Dmitriy Zaporozhets got tired of having to keep the CI servers running separately, so we decided to bring continuous integration into the mix. Even then we knew it didn’t make sense for companies to “DIY” critical parts of their process. That being said, it did feel counterintuitive to bring SCM and CI together, but we tried it anyway. Continuous delivery (CD) eventually evolved out of the CI/SCM integration, but it is crazy to think that when we started GitLab, CI/CD was not really a consideration.\n\n## DIY DevOps really did exist\n\nWhat people were talking about, though, was DevOps, and specifically DIY DevOps because back then it was completely normal for teams to assemble a bunch of tools and call it done. When we would talk about the importance of fewer tools and more integration, people would turn up their noses. We heard a lot of “different tools for different things” and “many have sharp tools.” Today we know that a DevOps platform increases development speed and  release cadences. But back then, gluing together tools was seen as normal.\n\n## What’s old is new again\n\nBack in the day there were lots of tools and also very different programming languages than we reach for today. In the 2014 era, developers often wrote code in Ruby or JavaScript, and kept things layers away from the microprocessor. Over the years, that’s changed drastically. [Rust](/blog/rust-programming-language/) and Go – as just two examples – have brought us back to the processor and reflect today’s modern programming styles. It’s another sign of how drastically things have shifted over time.\n\n## It wasn’t cloud-y\n\nThe cloud was in its infancy when GitLab started and at the time we all thought it was probably a great solution for startups or small businesses, but perhaps not something that would ever be in widespread use. Fast-forward to today where most companies run their infrastructures in the cloud. Now it’s widely accepted a cloud native architecture helps teams deliver better software faster and cloud skepticism has drifted away.\n\n## Security was siloed\n\nSecurity teams, and tools, were completely separate entities when GitLab began and that, of course, made doing something inherently difficult even more so. Devs were asked to fix bugs without any context, process, or knowledge of deployment status, and naturally weren’t very excited about it all. Realizing this, we began slowly adding scans to our CI/CD steps so that security was part of the pipeline and not separate from it. The goal is to let developers and teams deal with security in an incremental way, rather than a large to-do list at the end of the process. And that [progress is ongoing](/blog/one-devops-platform-can-help-you-achieve-devsecops/). \n\n## Code review wasn’t integrated\n\nEleven years ago, code review wasn’t that different from security, i.e., it was something done in a distant time and place and without context. Today, merge requests are the hub of all the reviews, including code, security, and compliance, and the concept of “review” is firmly  embedded in the process. Code review itself is now getting a boost from machine learning (ML) with “suggested reviewer,” [a feature we’ve added in beta](/blog/unreview-a-year-later-how-gitlab-is-being-transformed-by-ml-powered-code-review/) at the time of this writing but will be coming to all of GitLab at some point during the 15.x releases. \n\n> You’re invited! Join us on June 23rd for the [GitLab 15 launch event](https://page.gitlab.com/fifteen) with DevOps guru Gene Kim and several GitLab leaders. They’ll show you what they see for the future of DevOps and The One DevOps Platform.\n\n## Open source\n\nIt’s fair to say the open source community is stronger and more visible today than it was 11 years ago. GitLab came from the open source community and we continue to proudly define our company and product as open source. Through the years, we’ve tried to keep the open source enthusiasm going by creating an environment where [customers can and do contribute regularly](/blog/how-you-contribute-to-gitlabs-open-devops-platform/) to our product. We want to continue to preserve GitLab as an open source project as well as our community and the company that sustains it all.\n\n## It’s remotely possible \n\nAnd we can’t have a comprehensive retrospective without looking at the concept of remote work. It was practically unheard of in 2011 and, though it’s been normalized today, we spent a long time taking this journey alone. So today’s reality – that [successful asynchronous work](/blog/five-ways-to-scale-remote-work/) means having a platform to enable it – is especially satisfying for us.\n",[9,109,875],{"slug":3311,"featured":6,"template":686},"gitlab-15-the-retrospective","content:en-us:blog:gitlab-15-the-retrospective.yml","Gitlab 15 The Retrospective","en-us/blog/gitlab-15-the-retrospective.yml","en-us/blog/gitlab-15-the-retrospective",{"_path":3317,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3318,"content":3324,"config":3331,"_id":3333,"_type":14,"title":3334,"_source":16,"_file":3335,"_stem":3336,"_extension":19},"/en-us/blog/gitlab-achieves-aws-devops-competency-certification",{"title":3319,"description":3320,"ogTitle":3319,"ogDescription":3320,"noIndex":6,"ogImage":3321,"ogUrl":3322,"ogSiteName":670,"ogType":671,"canonicalUrls":3322,"schema":3323},"GitLab achieves AWS DevOps Competency certification","GitLab has been certified with AWS DevOps Competency, affirming our further commitment as a technology partner with Amazon Web Services.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666959/Blog/Hero%20Images/gitlab-aws-cover.png","https://about.gitlab.com/blog/gitlab-achieves-aws-devops-competency-certification","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab achieves AWS DevOps Competency certification\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tina Sturgis\"},{\"@type\":\"Person\",\"name\":\"Eliran Mesika\"}],\n        \"datePublished\": \"2018-11-28\",\n      }",{"title":3319,"description":3320,"authors":3325,"heroImage":3321,"date":3328,"body":3329,"category":299,"tags":3330},[3326,3327],"Tina Sturgis","Eliran Mesika","2018-11-28","\n\nToday, we are proud to announce GitLab has been certified with [AWS DevOps Competency](https://aws.amazon.com/devops/partner-solutions/), affirming our further commitment as a technology partner with Amazon Web Services (AWS).\n\nBuilding on the foundation of our AWS partnership over the last three years, with this DevOps certification we’ve now received the highest level of accreditation available from AWS. We bring proven customer success with measurable return on investment for customers running GitLab on AWS and [using GitLab to deploy their software to AWS](/partners/technology-partners/aws/).\n\n![AWS DevOps Competency badge](https://about.gitlab.com/images/blogimages/DevOps_competency_badge.png){: .small.right.wrap-text}\n\n### Why the AWS DevOps Competency matters\n\nAchieving this certification sets GitLab apart as an AWS Partner Network (APN) member that provides demonstrated DevOps technical proficiency and proven customer success, with specific focus in the [Continuous Integration](/solutions/continuous-integration/) and [Continuous Delivery](/solutions/continuous-integration/) category.\n\nThis is important for our own customers who are either looking to move to AWS or are already using it, as well as for current AWS customers. Potential users of GitLab with AWS can be assured that the GitLab solution has been reviewed and approved by an AWS Architect Review Board and that it meets [AWS Security Best Practices](https://d0.awsstatic.com/whitepapers/Security/AWS_Security_Best_Practices.pdf).\n\nThrough this process we were able to demonstrate our product is production ready on AWS for DevOps, specifically for improving application delivery, application build/test, or infrastructure/configuration management.\n\n### GitLab and AWS customer success\n\nTo learn more about the GitLab customer case studies considered for this competency, please review both the Axway and [Trek10](/customers/trek10/) case studies. You can also access information about other customers on the [GitLab customers page](/customers/).\n\n### More about the AWS Competency Program\n\nAWS established the program to help customers identify, through the AWS Partner Network, partners with deep industry experience and expertise in specialized solution areas. Attaining an AWS Competency allows partners to differentiate themselves to customers by showcasing expertise in a specific solution area.\n\nWe are honored to obtain this AWS DevOps Competency status, and believe this helps advance [our mission to allow everyone to contribute](/company/mission/#mission). Our definition of everyone now extends further, to those who are small and large users of AWS and AWS Services on their DevOps journey.   \n\nFor more information on GitLab’s partnership with AWS, check out [about.gitlab.com/solutions/aws](/partners/technology-partners/aws/).\n\nTo learn more about GitLab’s Technology Partners, visit [about.gitlab.com/partners](/partners/technology-partners/).\n",[976,9,726],{"slug":3332,"featured":6,"template":686},"gitlab-achieves-aws-devops-competency-certification","content:en-us:blog:gitlab-achieves-aws-devops-competency-certification.yml","Gitlab Achieves Aws Devops Competency Certification","en-us/blog/gitlab-achieves-aws-devops-competency-certification.yml","en-us/blog/gitlab-achieves-aws-devops-competency-certification",{"_path":3338,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3339,"content":3345,"config":3351,"_id":3353,"_type":14,"title":3354,"_source":16,"_file":3355,"_stem":3356,"_extension":19},"/en-us/blog/gitlab-acquisitions",{"title":3340,"description":3341,"ogTitle":3340,"ogDescription":3341,"noIndex":6,"ogImage":3342,"ogUrl":3343,"ogSiteName":670,"ogType":671,"canonicalUrls":3343,"schema":3344},"A guide to GitLab’s soft landing acquisitions","Find the team a new home, release your technology to a wider user base, and continue to build products you love through a soft-landing acquisition.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680671/Blog/Hero%20Images/soft-landing-acquisitions.jpg","https://about.gitlab.com/blog/gitlab-acquisitions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A guide to GitLab’s soft landing acquisitions\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Eliran Mesika\"}],\n        \"datePublished\": \"2019-07-30\",\n      }",{"title":3340,"description":3341,"authors":3346,"heroImage":3342,"date":3347,"body":3348,"category":299,"tags":3349},[3327],"2019-07-30","\n\nA few months ago we published our [acquisitions handbook](/handbook/acquisitions/). A first of its kind, it provides a clear view on how we approach and carry out acquisitions at GitLab. We believe this handbook is the basis for effective communication and expectation setting.\n\nOur unique approach to acquisition is suited for companies which have built great technologies but were unable to reach the desired distribution and are nearing the end of their runway. For companies in this state we are offering an opportunity for soft landing in GitLab through acquisition, finding the team a new home, releasing the technology you developed to the greater GitLab user base, and continuing to build awesome products you love.\n\n## Is this relevant for your company?\n\nIf you’re a technology company:\n1. Operating in the expanded [DevOps space](/direction/)\n2. With a team of 10 employees or fewer\n3. At the end of your runway and/or thinking about winding down\n4. Open to a soft-landing acquisition and ready to move through the process quickly\n\n... then your company is potentially a great fit for our soft-landing acquisition process.\n\n## What GitLab has to offer\n\n1. Assets will be purchased for up to $1M total, all cash. GitLab stock will not be offered as part of the deal for the assets sold.\n2. We believe talent follows leadership they trust. In addition to the purchase price, GitLab will offer cash bonuses for founders and engineers to help in the transition, conditional on employee interviews and offer acceptance:\n   - Each founder with more than 10% ownership of the company will receive $250,000 paid as follows: $50,000 on closing and $200,000 as a retention bonus\n   - Each engineer will receive $60,000 paid as follows: $12,000 on closing and $48,000 as a retention bonus\n1. Triple our normal stock option grants for founders, normal stock option grants for non-founders\n\nWe invite you to take a closer look at our acquisitions [handbook page](/handbook/acquisitions/) and reach out to myself, the acquisitions lead,  eliran@gitlab.com, to start a discussion.\n\nIt's important to add that we're open to other types of acquisitions, aside from the soft-landing type. We've felt it's beneficiary to all sides of a soft-landing acquisition to have a streamlined, fast process, which is why we've created ours at GitLab. If you'd like to engage us in an acquisition conversation, again, feel free to reach out to me at eliran@gitlab.com.\n\nYou can also read about [one startup's experience of being acquired by GitLab](/blog/gemnasium-our-gitlab-journey/).\n\nCover image by [Pascal Meier](https://unsplash.com/photos/UYiesSO4FiM) on [Unsplash](https://unsplash.com)\n{: .note}\n",[9,936,3350],"remote work",{"slug":3352,"featured":6,"template":686},"gitlab-acquisitions","content:en-us:blog:gitlab-acquisitions.yml","Gitlab Acquisitions","en-us/blog/gitlab-acquisitions.yml","en-us/blog/gitlab-acquisitions",{"_path":3358,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3359,"content":3365,"config":3372,"_id":3374,"_type":14,"title":3375,"_source":16,"_file":3376,"_stem":3377,"_extension":19},"/en-us/blog/gitlab-adds-further-measures-to-combat-credential-stuffing-and-other-types-of-platform-abuse",{"title":3360,"description":3361,"ogTitle":3360,"ogDescription":3361,"noIndex":6,"ogImage":3362,"ogUrl":3363,"ogSiteName":670,"ogType":671,"canonicalUrls":3363,"schema":3364},"How GitLab is fighting credential stuffing and platform abuse","Integration of fraud detection and prevention tool into authentication flow increases risk reduction.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671606/Blog/Hero%20Images/workflow-tips-security-quality-cover.jpg","https://about.gitlab.com/blog/gitlab-adds-further-measures-to-combat-credential-stuffing-and-other-types-of-platform-abuse","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab adds further measures to combat credential stuffing and other types of platform abuse\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Monmayuri Ray\"}],\n        \"datePublished\": \"2022-08-19\",\n      }",{"title":3366,"description":3361,"authors":3367,"heroImage":3362,"date":3369,"body":3370,"category":875,"tags":3371},"GitLab adds further measures to combat credential stuffing and other types of platform abuse",[3368],"Monmayuri Ray","2022-08-19","\n\nWith an observed increase in credential stuffing attacks, we at GitLab send periodic reminders to users to [enable multifactor authentication](https://docs.gitlab.com/ee/user/profile/account/two_factor_authentication.html), which helps to reduce this type of attack but does not entirely eliminate it. Since MFA is a choice per user discretion, we have some users who have not enabled MFA.\n[Credential stuffing attacks](https://owasp.org/www-community/attacks/Credential_stuffing) are particularly threatening because they are a popular method by which scammers take over users’ accounts, at scale.\n \nTo further reduce the threat of credential stuffing attacks on GitLab.com, the anti-abuse team at GitLab implemented additional protections when users authenticate. We contracted with fraud prevention and account security firm Arkose Labs to integrate [Arkose Protect](https://www.arkoselabs.com/arkose-protect/) into the user login flow to validate sessions before allowing successful login. This initiative was prioritized as part of a rapid action process where there was collaboration among various teams, engineers, and Arkose Labs for the implementation to go live on April 29, 2022.\n\nThe rapid action implementation reduces the risk of account takeover for GitLab.com users, while also reducing spam and crypto mining abuse of our users' projects.\n\n## How this risk reduction works\n\nWe look into several checks within the authentication flow, which include change in IP address, user activity, and failed login attempts for Arkose Labs to evaluate the risk of the session. The risk score is based on a multi-classification machine learning model of “high”, “medium”, “low”.\n\nIf the risk is rated low, the user is allowed to proceed to authenticate and has the same experience they had previously. Approximately 10% of the time the risk is higher. In that case, the user must complete an enhanced CAPTCHA from Arkose Labs before they are allowed to authenticate. Based on the feedback data, the score system is also adjusted and learns from reported false positives and false negatives.\n\nThe flow:\n\n![the flow](https://about.gitlab.com/images/blogimages/credentialstuffing3.png){: .shadow}\n\nImplementing these security controls reduces the risk of automated password guessing while also reducing automated account registrations that, as mentioned above, are used by some attackers to spam or do crypto mining. The reduction in abuse has been significant: Accounts blocked by automation and manually by our trust and safety team members were reduced by more than 40% as a result of these new features.\n\n## The future\n\nThe anti-abuse team is planning future work to further reduce abuse of our platform while minimizing the impact on legitimate users when they register for an account, authenticate, and use features that are sometimes abused (such as CI jobs being abused to do crypto mining). For example, we plan to have a holistic user scoring engine that can provide a trust score based on every activity. \n\nLearn more about [how GitLab works with Arkose Protect](https://docs.gitlab.com/ee/integration/arkose.html).\n\n",[9,728,875],{"slug":3373,"featured":6,"template":686},"gitlab-adds-further-measures-to-combat-credential-stuffing-and-other-types-of-platform-abuse","content:en-us:blog:gitlab-adds-further-measures-to-combat-credential-stuffing-and-other-types-of-platform-abuse.yml","Gitlab Adds Further Measures To Combat Credential Stuffing And Other Types Of Platform Abuse","en-us/blog/gitlab-adds-further-measures-to-combat-credential-stuffing-and-other-types-of-platform-abuse.yml","en-us/blog/gitlab-adds-further-measures-to-combat-credential-stuffing-and-other-types-of-platform-abuse",{"_path":3379,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3380,"content":3386,"config":3392,"_id":3394,"_type":14,"title":3395,"_source":16,"_file":3396,"_stem":3397,"_extension":19},"/en-us/blog/gitlab-and-jira-integration-the-final-steps",{"title":3381,"description":3382,"ogTitle":3381,"ogDescription":3382,"noIndex":6,"ogImage":3383,"ogUrl":3384,"ogSiteName":670,"ogType":671,"canonicalUrls":3384,"schema":3385},"GitLab and Jira integration: the final steps","The last of our three-part series on GitLab and Jira integrations offers a step-by-step look at how the tools work together.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679490/Blog/Hero%20Images/jira-importer-blog-post.png","https://about.gitlab.com/blog/gitlab-and-jira-integration-the-final-steps","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab and Jira integration: the final steps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tye Davis\"}],\n        \"datePublished\": \"2021-05-24\",\n      }",{"title":3381,"description":3382,"authors":3387,"heroImage":3383,"date":3389,"body":3390,"category":791,"tags":3391},[3388],"Tye Davis","2021-05-24","\n_This is the third in our three-part series on GitLab and Jira integrations. [Part one](/blog/integrating-gitlab-com-with-atlassian-jira-cloud/) explained how to integrate GitLab.com with Jira Cloud. [Part two](/blog/gitlab-jira-integration-selfmanaged/) walked through a detailed explanation of integrating GitLab self-managed with Jira._\n\nAfter the integration is set up on GitLab and Jira, you can:\n\n* Refer to any Jira issue by its ID in GitLab branch names, commit messages, and merge request titles.\n\n* Using commit messages in GitLab, you have the ability to move Jira issues along that Jira projects defined transitions. Here you can see that this Jira issue has Backlog, Selected for Development, In Progress and Done. \n\n![Issue View in Jira](https://about.gitlab.com/images/blogimages/atlassianjira/issueview.png){: .shadow.medium.center}\nIssue View in Jira\n{: .note.text-center}\n\n* As referenced in the Base GitLab-Jira integration, when you comment in a merge request and commit referencing an issue, e.g., PROJECT-7, will add a comment in Jira issue in the format. In addition, by commenting in a jira transition (putting a “#” first), this will initiate the movement of a Jira Issue to the desired transition. Below is using the built-in GitLab Web IDE (this can be done in your Web IDE of choice as well).\n\n![Comment in a Commit/MR](https://about.gitlab.com/images/blogimages/atlassianjira/commitcomment.png){: .shadow.medium.center}\nComment in a Commit/MR\n{: .note.text-center}\n\n* Currently, the Jira-GitLab Dev Panel integration via DVCS refreshes on a 60-min schedule. To expedite, you’ll need to manually refresh the specific project with your most recent changes.\n\n![Dev Panel refreshes every 60 minutes](https://about.gitlab.com/images/blogimages/atlassianjira/devpanelrefresh.png){: .shadow.medium.center}\nDev Panel refreshes every 60 minutes\n{: .note.text-center}\n\n* See the linked branches, commits, and merge requests in Jira issues (merge requests are called “pull requests” in Jira issues).\nJira issue IDs must be formatted in uppercase for the integration to work.\n\n![See GitLab linked in the Dev Panel](https://about.gitlab.com/images/blogimages/atlassianjira/gitlabdevpanel.png){: .shadow.medium.center}\nSee GitLab linked in the Dev Panel\n{: .note.text-center}\n\n* Click the links to see your GitLab repository data.\n\n![Click into the commits](https://about.gitlab.com/images/blogimages/atlassianjira/clickintocommit.png){: .shadow.medium.center}\nClick into the commits\n{: .note.text-center}\n\n![See GitLab linked in the Dev Panel](https://about.gitlab.com/images/blogimages/atlassianjira/clickintopr.png){: .shadow.medium.center}\nClick into the merge (pull) requests\n{: .note.text-center}\n\nFor more information on using Jira Smart Commits to track time against an issue, specify an issue transition, or add a custom comment, see the Atlassian page Using [Smart Commits](https://confluence.atlassian.com/fisheye/using-smart-commits-960155400.html)\n\n## View Jira Issues within GitLab\n\nYou can browse and search issues from a selected Jira project directly in GitLab. This requires configuration in GitLab by an administrator.\n\n* In the GitLab integration setup for Jira, click \"enable Jira issues.\"\n\n![Enable Jira issues in GitLab](https://about.gitlab.com/images/blogimages/atlassianjira/enablejiraissues.png){: .shadow.medium.center}\nEnable Jira issues in GitLab\n{: .note.text-center}\n\n* Locate your project key in Jira.\n\n![Locate your project key in Jira](https://about.gitlab.com/images/blogimages/atlassianjira/locateprojectkey.png){: .shadow.medium.center}\nLocate your project key in Jira\n{: .note.text-center}\n\n* Add your proejct key into the GitLab integration setup for Jira.\n\n![Add your proejct key to GitLab](https://about.gitlab.com/images/blogimages/atlassianjira/addprojectkey.png){: .shadow.medium.center}\nAdd your proejct key to GitLab\n{: .note.text-center}\n\n* Select \"Jira Issues\", then \"Issue List\" from the left panel in GitLab\n\n![Select Jira Issues on left panel](https://about.gitlab.com/images/blogimages/atlassianjira/selectjiraissues.png){: .shadow.medium.center}\nSelect Jira Issues\n{: .note.text-center}\n\nFrom the Jira Issues menu, click Issues List. The issue list defaults to sort by Created date, with the newest issues listed at the top. You can change this to Last updated.\nIssues are grouped into tabs based on their [Jira status](https://confluence.atlassian.com/adminjiraserver070/defining-status-field-values-749382903.html).\n\n* The Open tab displays all issues with a Jira status in any category other than Done.\n* The Closed tab displays all issues with a Jira status categorized as Done.\n* The All tab displays all issues of any status.\n\nClick an issue title to open its original Jira issue page for full details.\n\n![View Jira issues in GitLab](https://about.gitlab.com/images/blogimages/atlassianjira/viewjiraissues.png){: .shadow.medium.center}\nView Jira issues in GitLab\n{: .note.text-center}\n\n### Search and filter the issues list\n\nTo refine the list of issues, use the search bar to search for any text contained in an issue summary (title) or description.\nYou can also filter by labels, status, reporter, and assignee using URL parameters. Enhancements to be able to use these through the user interface are [planned](https://gitlab.com/groups/gitlab-org/-/epics/3622).\n\n* To filter issues by labels, specify one or more labels as part of the labels[] parameter in the URL. When using multiple labels, only issues that contain all specified labels are listed. /-/integrations/jira/issues?labels[]=backend&labels[]=feature&labels[]=QA\n* To filter issues by status, specify the status parameter in the URL. /-/integrations/jira/issues?status=In Progress\n* To filter issues by reporter, specify a reporter’s Jira display name for the author_username parameter in the URL. /-/integrations/jira/issues?author_username=John Smith\n* To filter issues by assignee, specify their Jira display name for the assignee_username parameter in the URL. /-/integrations/jira/issues?assignee_username=John Smith\n\n## Troubleshooting\nIf these features do not work as expected, it is likely due to a problem with the way the integration settings were configured.\n\n### GitLab is unable to comment on a Jira issue\n\nMake sure that the Jira user you set up for the integration has the correct access permission to post comments on a Jira issue and also to transition the issue, if you’d like GitLab to also be able to do so. Jira issue references and update comments will not work if the GitLab issue tracker is disabled.\n\n### GitLab is unable to close a Jira issue\n\nMake sure the Transition ID you set within the Jira settings matches the one your project needs to close an issue.\nMake sure that the Jira issue is not already marked as resolved; that is, the Jira issue resolution field is not set. (It should not be struck through in Jira lists.)\n\n## Conclusion\n \nGitLab helps teams ship software faster with technology integration options, such as the integration with Jira, that automate tasks, provide visibility into development progress and the greater end-to-end software lifecycle. We recognize that many companies use Jira for Agile project management and our seamless integration brings Jira together with GitLab. \n\n## Watch and learn\n\nMore of a video person? For a walkthrough of the integration with GitLab for Jira, watch and learn how to configure GitLab Jira Integration using Marketplace App.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/fWvwkx5_00E\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\n",[749,231,9],{"slug":3393,"featured":6,"template":686},"gitlab-and-jira-integration-the-final-steps","content:en-us:blog:gitlab-and-jira-integration-the-final-steps.yml","Gitlab And Jira Integration The Final Steps","en-us/blog/gitlab-and-jira-integration-the-final-steps.yml","en-us/blog/gitlab-and-jira-integration-the-final-steps",{"_path":3399,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3400,"content":3406,"config":3414,"_id":3416,"_type":14,"title":3417,"_source":16,"_file":3418,"_stem":3419,"_extension":19},"/en-us/blog/gitlab-and-oracle-partner-for-a-cloud-native-approach-to-modern-application-development",{"title":3401,"description":3402,"ogTitle":3401,"ogDescription":3402,"noIndex":6,"ogImage":3403,"ogUrl":3404,"ogSiteName":670,"ogType":671,"canonicalUrls":3404,"schema":3405},"Oracle and GitLab partner for cloud-native app development","Learn the benefits of deploying the DevOps platform on Oracle Cloud Infrastructure.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668514/Blog/Hero%20Images/multi-cloud-future.jpg","https://about.gitlab.com/blog/gitlab-and-oracle-partner-for-a-cloud-native-approach-to-modern-application-development","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab and Oracle partner for a cloud native approach to modern application development\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Creighton Swank\"},{\"@type\":\"Person\",\"name\":\"Vick Kelkar\"}],\n        \"datePublished\": \"2022-10-20\",\n      }",{"title":3407,"description":3402,"authors":3408,"heroImage":3403,"date":3411,"body":3412,"category":769,"tags":3413},"GitLab and Oracle partner for a cloud native approach to modern application development",[3409,3410],"Creighton Swank","Vick Kelkar","2022-10-20","\nModern application development requires a cloud native platform that can operate in and across multiple cloud providers. GitLab has partnered with Oracle to enable customers to run GitLab’s DevOps platform on Oracle Cloud Infrastructure (OCI).\n\nWith OCI, organizations can accelerate migrations of existing enterprise workloads, deliver better reliability and performance for all applications, and offer the complete services customers need to build innovative cloud applications. With GitLab’s DevOps platform and OCI, businesses can create a resilient, high-performance DevOps environment. OCI also supports automatic operating system patching and zero trust architecture, which aligns with GitLab’s focus on [application security](/stages-devops-lifecycle/secure/).\n\n## The benefits of pairing GitLab and OCI\n\nPairing GitLab’s DevOps platform and OCI provides many benefits, including the following:\n\n- performance\n- platform breadth\n- security\n- value\n- hybrid and multi-cloud environments\n- GovCloud regions\n\n### Performance\n\nOCI provides a high-performance, resilient foundation for cloud services. Customers can quickly provision instances that feature the latest-generation processors via API, SDK, command line, Terraform, or the console. Workloads can scale up and/or out based on their requirements and compute-intensive workloads can leverage GPU shapes for hardware acceleration of AI/ML workloads. At the same time, GitLab runners can be configured to [leverage Nvidia GPUs](https://docs.gitlab.com/runner/configuration/gpus.html) for various executors to take advantage of GPUs and AI/ML workloads. \n\n### Platform breadth\n\nGitLab’s DevOps platform has the ability to integrate with Kubernetes service like OKE via GitLab Kubernetes agent. Leveraging GitLab’s Kubernetes agent will unlock [GitOps workflow](https://docs.gitlab.com/ee/user/clusters/agent/gitops.html) and [CI/CD workflow](https://docs.gitlab.com/ee/user/clusters/agent/ci_cd_workflow.html) for cloud native development. And the Oracle Cloud Infrastructure also offers a wide variety of platform services that allow customers to run workloads without having to manage infrastructure. Customers can run workloads on compute instances, in containers with Oracle Kubernetes Engine (OKE), or even as serverless functions. Services like object storage and events can be leveraged to build applications without managing infrastructure at all. For a complete list of these services, please click [here](https://docs.oracle.com/en-us/iaas/Content/services.htm). \n\n### Security\n\nThe second generation of OCI has been redesigned from the ground up to be a secure cloud. Oracle designed OCI architecture for security of the platform through isolated network virtualization, highly secure firmware installation, a controlled physical network, and network segmentation. GitLab’s DevOps platform is not only an ODIC provider but the platform integrates with other identity providers to support single sign-on capabilities. The platform’s [permission model](https://docs.gitlab.com/ee/user/permissions.html#instance-wide-user-permissions) follows similar approaches used by OCI around separation of concerns and role-based access to resources. \n\n### Value\n\nMission-critical and revenue-generating applications demand more than just availability from their cloud infrastructure. Mission-critical workloads also require consistent performance and the ability to manage, monitor, and modify resources running in the cloud at any time. OCI offers end-to-end SLAs covering performance, availability, and manageability of services. \n\nGitLab’s DevOps platform uses the same code base for the SaaS offering as well as self-managed instances. Having the same code base allows customers to adopt the mission-critical DevOps platform in heavily regulated industries such as financial services and healthcare.\n\n### Support for hybrid and multi-cloud environments\n\nEven though many enterprises are moving workloads to the cloud, the reality is this is a multi-cloud world, and many enterprises still maintain infrastructure locally. Oracle has entered into strategic partnerships designed to make it easier for customers to operate in a hybrid and multi-cloud environment. \n\nOracle has partnered with VMware to create the Oracle Cloud VMware solution that allows customers the ability to use their existing tools and processes to manage a VMware environment in OCI. This allows enterprises to accelerate cloud adoption without having to re-architect their applications.\n\nGitLab’s DevOps platform can be deployed on vSphere infrastructure using the GitLab [omnibus install](https://docs.gitlab.com/omnibus/) method. The platform can be installed on-premises or in the cloud. GitLab can be deployed on VMs and the GitLab runners can extend CI capabilities into other cloud environments and [cloud-native hybrid](https://docs.gitlab.com/ee/administration/reference_architectures/#cloud-native-hybrid) deployments.\n\n### GovCloud regions\n\nOCI can provide government customers with the stringent security standards necessary to protect the federal government's data. Oracle has obtained a P-ATO from the Joint Authorization Board for FedRAMP High in its U.S. Government Cloud regions. Varying levels of DISA authorizations are also available but vary by services. Find an up-to-date list [here](https://www.oracle.com/industries/government/federal/fedramp/). Meanwhile, GitLab is pursuing a FedRAMP moderate certification and working on activities related to FedRAMP-ready designation. \n\n## Get started with the GitLab DevOps platform and OCI\nOrganizations looking to run GitLab’s DevOps platform on OCI can leverage the supported [Oracle Linux](/install/) package for the platform install. Alternatively, they can leverage the helm chart or GitLab Operator to deploy to Oracle Kubernetes Engine (OKE), which will provide a [cloud-native hybrid approach](https://docs.gitlab.com/ee/administration/reference_architectures/25k_users.html#cloud-native-hybrid-reference-architecture-with-helm-charts-alternative) of the GitLab DevOps platform on OCI.\n\nGitLab’s DevOps platform, delivered as a single application, can run on multiple clouds and has the capability of supporting various official [Linux packages](/install/). Besides Linux packages, GitLab’s platform also supports deployments on Kubernetes using [helm charts](https://docs.gitlab.com/charts/) and Kubernetes [GitLab Operator](https://docs.gitlab.com/operator/). \n\nIf you would like to learn more about the GitLab DevOps platform and OCI, please access the [LiveLabs](https://apexapps.oracle.com/pls/apex/dbpm/r/livelabs/home).\n\n_[Kelkar](https://gitlab.com/vkelkar) is GitLab's Director of Alliances. Swank is Distinguished Cloud Architect and Cloud CTO at Oracle._\n",[9,682,1041,282],{"slug":3415,"featured":6,"template":686},"gitlab-and-oracle-partner-for-a-cloud-native-approach-to-modern-application-development","content:en-us:blog:gitlab-and-oracle-partner-for-a-cloud-native-approach-to-modern-application-development.yml","Gitlab And Oracle Partner For A Cloud Native Approach To Modern Application Development","en-us/blog/gitlab-and-oracle-partner-for-a-cloud-native-approach-to-modern-application-development.yml","en-us/blog/gitlab-and-oracle-partner-for-a-cloud-native-approach-to-modern-application-development",{"_path":3421,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3422,"content":3427,"config":3432,"_id":3434,"_type":14,"title":3435,"_source":16,"_file":3436,"_stem":3437,"_extension":19},"/en-us/blog/gitlab-and-redhat-automation",{"title":3423,"description":3424,"ogTitle":3423,"ogDescription":3424,"noIndex":6,"ogImage":1861,"ogUrl":3425,"ogSiteName":670,"ogType":671,"canonicalUrls":3425,"schema":3426},"GitLab and Red Hat: Automation to enhance secure software development","How our closer relationship with Red Hat will boost deployment automation.","https://about.gitlab.com/blog/gitlab-and-redhat-automation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab and Red Hat: Automation to enhance secure software development\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vick Kelkar\"}],\n        \"datePublished\": \"2020-04-29\",\n      }",{"title":3423,"description":3424,"authors":3428,"heroImage":1861,"date":3429,"body":3430,"category":726,"tags":3431},[3410],"2020-04-29","\n\nWe're working towards a closer relationship with Red Hat and we're excited about the possibilities. We think developers can reduce time spent coding while still increase productivity with technologies from GitLab and Red Hat. Here's what you need to know.\n\n### Why GitLab?\n\nGitLab enables both the developers and operations teams to apply [DevOps](/topics/devops/) practices using a single application. Using one tool for the entire application’s lifecycle, i.e. right from development and deployment to operations, allows the organization to achieve operational efficiency and reduce deployment cycle times.\n\nGitLab not only provides source code management ([SCM](/solutions/source-code-management/)) but it also offers CI/CD to make streamlined deployments to a container platform like Red Hat OpenShift while maintaining visibility into the deployment pipelines. Furthermore, with [AutoDevOps](https://docs.gitlab.com/ee/topics/autodevops/), the GitLab application also addresses the organization’s security requirements through scanning and dependency mapping for the developed application. The ability to check the license of software being used, before deploying it in a production environment, helps organizations reduce their [compliance risks](/solutions/compliance/).\n\n### Why GitLab with Red Hat?\n\nRed Hat has a number of technologies in its portfolio. At the core is Red Hat Enterprise Linux ([RHEL](https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux)), an enterprise-grade Linux operating system (OS) platform used by many Fortune 500 companies that can be deployed across the hybrid cloud, from bare-metal and virtual servers to private and public cloud environments. RHEL makes it easier for the operations team to manage the upgrades, security patches and life cycles of servers being used to run applications like GitLab. Red Hat also provides the industry’s most comprehensive enterprise Kubernetes platform in Red Hat OpenShift. OpenShift is uniquely positioned to run a containerized application on a public or private cloud.\n\nGitLab can accelerate software development and deployment of applications while RHEL can act as the more secure, fully managed OS that can scale with the application. The inclusion of new DevOps tools in Red Hat’s hybrid cloud technologies like [service mesh](https://www.openshift.com/blog/red-hat-openshift-service-mesh-is-now-available-what-you-should-know) empowers developers to iterate faster on a foundation of trusted enterprise Linux.\n\nThe GitLab solution, which includes [CI/CD workflow](/topics/ci-cd/), an AutoDevOps workflow, a container registry, and Kubernetes integration can be deployed on RHEL using [install](/install/) instructions and you can find out more about GitLab SaaS pricing model [here](/pricing/#gitlab-com). You can read our sales [FAQ](/sales/#faq) or contact our [sales team](/sales/) if you have questions about the offering.\n\nGitLab can be deployed on RHEL-based machines to provide organizations with DevOps infrastructure and collaboration tools. Our collaboration with Red Hat doesn't stop as a supported platform for the GitLab Server but Red Hat OpenShift can also be a target for our CI/CD and Auto DevOps workflows. Application container images can be pushed to our registry and used to deploy applications into Red Hat OpenShift.\n\n### What’s Next?\n\nAs GitLab and Red Hat increase their collaboration, we plan to announce the availability of GitLab Runner Operator for OpenShift in the near future. At GitLab, we have an [engineering epic](https://gitlab.com/groups/gitlab-org/-/epics/2068) underway to develop first-class support for OpenShift.\n\nWith the upcoming product integrations with Red Hat, GitLab is striving to increase collaboration in the organization, increase developer velocity and reduce friction between teams, regardless of the deployment models of VMs or containers. The overarching goal is to help organizations improve their [DevSecOps](/solutions/security-compliance/) posture while significantly reducing security and compliance risks.\n\n### Resources\n\n- [GitOps:The Future of Infrastructure Automation - A panel discussion with Weaveworks, HashiCorp, Red Hat, and GitLab](https://about.gitlab.com/why/gitops-infrastructure-automation/)\n- [RHEL 8 Install documentation](https://about.gitlab.com/install/#centos-8)\n- [and RHEL 7 Install documentation](https://about.gitlab.com/install/#centos-7)\n- [GitLab on Microsoft Azure](https://docs.gitlab.com/ee/install/azure/)\n- [Try OpenShift](https://www.openshift.com/try)\n",[855,109,1041,9,1477],{"slug":3433,"featured":6,"template":686},"gitlab-and-redhat-automation","content:en-us:blog:gitlab-and-redhat-automation.yml","Gitlab And Redhat Automation","en-us/blog/gitlab-and-redhat-automation.yml","en-us/blog/gitlab-and-redhat-automation",{"_path":3439,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3440,"content":3445,"config":3450,"_id":3452,"_type":14,"title":3453,"_source":16,"_file":3454,"_stem":3455,"_extension":19},"/en-us/blog/gitlab-and-the-three-ways-of-devops",{"title":3441,"description":3442,"ogTitle":3441,"ogDescription":3442,"noIndex":6,"ogImage":1449,"ogUrl":3443,"ogSiteName":670,"ogType":671,"canonicalUrls":3443,"schema":3444},"GitLab and the three ways of DevOps","DevOps isn't just an esoteric philosophy - it actually is a roadmap for faster and safer software releases, if you choose the right tool. Here's how to take the principles of DevOps and get the most out of the One DevOps Platform.","https://about.gitlab.com/blog/gitlab-and-the-three-ways-of-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab and the three ways of DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vlad Budica\"}],\n        \"datePublished\": \"2022-06-15\",\n      }",{"title":3441,"description":3442,"authors":3446,"heroImage":1449,"date":3447,"body":3448,"category":769,"tags":3449},[2703],"2022-06-15","\n\nMost of my daily conversations are focused on features and very deep technical concepts, which provide valuable and actionable insight. However, we miss the fact that tools and technology are leveraged to solve business challenges. When talking about features and technology, it's very easy to see the possible financial gain when replacing different tools with a unified platform. But it's missing all the improvement opportunities that will provide value at all the levels of a company from developers to executives.\n\nThe reality is that we're working in very complex systems, making it hard to see the forest from the trees. As an engineer, you're focused on solving the next immediate problem that arises without taking a step back to reevaluate the system itself. In some cases, the problem itself is created by the design of our software development lifecycle (SDLC). As an executive, it's difficult to balance the effort required to address the technical challenges with the pressure that comes from the business in this ever-increasing rhythm of change.\n\nMy goal with this article is to provide a high-level map that contains the most important DevOps principles and a shortcut. I know this is a bold statement as there is a lot of literature on this topic but my approach will be different.\n \nFirst, I'm going to use the [Three Ways](https://itrevolution.com/the-three-ways-principles-underpinning-devops/) as coined in [The DevOps Handbook](https://www.amazon.com/DevOps-Handbook-World-Class-Reliability-Organizations/dp/1942788002) because those are the three foundational principles of DevOps as they were refined from Lean, the Toyota Production System, Theory of Constraints, Six Sigma, and System Thinking principles. Second, I'll reference GitLab as the tool of choice because I think a good tool lets you focus on the work at hand, and GitLab does just that.  \n\n> You’re invited! Join us on June 23rd for the [GitLab 15 launch event](https://page.gitlab.com/fifteen) with DevOps guru Gene Kim and several GitLab leaders. They’ll show you what they see for the future of DevOps and The One DevOps Platform.\n\nHere is a short description of what the Three Ways are, what they're about, and why you should care.\n\n## First Way: Maximize flow\n\nThe First Way is all about making work/value flow better through the whole value stream (left to right), and to do that, we need to have a systems thinking approach and always look at the end-to-end result. In the case of IT, this means we optimize for speed from the moment we had the idea, to generating value with software running in production.\n\nWe need to have a good understanding of the system to find potential bottlenecks and areas of improvement. Our improvements should always lead to better overall performance, be aware of the cases in which local enhancements lead to global degradation, and avoid that.\n\nIn this process, it is crucial to stop defects from passing downstream from one workflow stage to another. Why? Because defects generate waste (of time and resources).\n\n## Second Way: Feedback loops\n\nThe Second Way deals with feedback loops, amplifying and shortening feedback loops so that we get valuable insight into the work we're doing. The feedback can be related to the code that's written or the improvement initiatives. Feedback loops maximize flow from right to left of the value stream.\n\nQuick, strong feedback loops help build quality into the product and ensure that we're not passing defects downstream. The quicker we do this the quicker and cheaper we can solve them, continuously keeping our software in a deployable state. It's easier for a developer to fix a bug when they are working on that change, and the code and the thought process are fresh in their mind. Suppose days or even weeks pass between the moment of the commit and the moment we realize there is a problem with the change. It will be significantly harder to address the problem, not to mention that we probably realized the problem only when trying to deploy the software and we have a service that's not working on our hands. On the flip side, feedback loops enable learning and experimentation, a point on which I’ll return a bit later.\n\nUsually, more developers lead to more productivity but, as presented in [The State of DevOps Report](https://cloud.google.com/blog/products/devops-sre/announcing-dora-2021-accelerate-state-of-devops-report), this is true only for high performers. Why? If we have a team of 50 developers and problems aren't immediately detected, technical debt builds up. Things will only get worse when we have 100 developers because they will generate even more technical debt with every development cycle. A natural tendency would be to add more developers in the hope velocity will get better, but it will degrade, so we add even more developers, and things degrade even more, and deployment frequency starts to suffer as it takes a lot of time to fix all the problems that came from upstream in order to get to a deployable state.\n\n## Third Way: Continuous experimentation and learning\n\nThe Third Way is about creating a culture of trust where continuous experimentation and learning can thrive. This leverages the first two ways in order to be successful.\n\nMaking work flow easily through the value stream enables us to experiment and even take some risks, while failing fast and inexpensively. Feedback loops act as the guardrails that help us keep the risk in check but also facilitate learning because learning happens only when strong fast feedback is available. We can have a scientific approach, experiment with things, and extract the learning and improvement that results from these experiments and their feedback.\n\nThis is an iterative process that will lead to mastery (through increased repetition). This should be coupled with an environment where this local learning becomes global and is integrated into the daily work of all the teams. For this approach to work and start getting some results, 20% of our time should be reserved for these improvement activities. I'm aware how difficult it can be to carve 20% of your time for improvement initiatives when dealing with urgent problems is your full-time job. Protecting this improvement time helps us pay our technical debt and make sure things are not spiraling out of control.\n\n## GitLab and the Three Ways\n\t\nNow that we presented the Three Ways of DevOps, maximizing flow (left to right), feedback loops (maximizing flow right to left) and having a continuous learning process, implementing them requires some effort from a tooling and process perspective.\n\nIt’s time to introduce GitLab into the picture, the only DevOps platform that covers the whole SDLC. Why is this useful for you? Because there is a synergy that happens when all the capabilities you need are provided in the same platform, the result is more than the sum of the components. Additionally, a good tool lets you focus on your work, not on the tool itself, so you can spend more time and effort driving your DevOps transformation. The fact that you’ll spend less money and time integrating different tools is the first immediate return of your investment.\n\nWhen the goal is to maximize flow from left to right, GitLab can facilitate that, starting from idea to production. Having the benefit of being a platform built from the ground up, work can flow from Planning to the commit and source code management stage and forward to CI/CD seamlessly. Any person involved in the SDLC can perform their work from the same UI. All the information they need is available without a need to switch through different UIs while paying the mental context-switching cost associated when using disparate tooling. \n\nGitLab provides different control mechanisms to make sure that if defects are introduced they are isolated and they don’t move downstream. Working in short-lived feature branches, different controls around merging and MR Request Approval rules act as gates. \n\nBy having everything on the same platform it’s easier to understand the whole flow of work, coupling this with our Value Stream Metrics enables everyone involved to get a better understanding of the overall system and find potential bottlenecks and improvement opportunities.\n\n### Improved flow\n\nAs mentioned, flow in one direction - left to right - is not enough to deliver better software products faster. Feedback loops that are quick and provide strong feedback are crucial for great business outcomes. From a developer perspective, the results of the CI pipeline provides immediate feedback about your change. If this pipeline contains security scans it’s even better. Providing feedback from a security standpoint ensures that we’re not deploying vulnerable code and it gives the developer the opportunity to go back and fix it immediately. This is very actionable feedback that also provides a learning opportunity because the security reports come with information about the vulnerabilities, and also where possible, a potential solution to the vulnerability. All this is available for you without any additional work to integrate different tools.\n\nSwitching perspectives, someone that needs to review or approve a code change has everything they need at their fingertips in one place. It’s straightforward to pull in or “@mention” other necessary parties and they’ll get access to all necessary context. A decision can be made immediately and it’s based on accurate and clear feedback that you can trace back to the initial idea. \n\n### Metrics matter\n\nTaking another step back, we get different metrics (Value Stream, Contribution) at the project level. This is one of the advantages that comes with a platform approach, and these insights are very easy to obtain and feed back into the process. When doing software development at scale, more senior managers need this feedback at an even higher level, and, therefore, these are available across multiple teams, projects, or departments. All this information is very valuable from a current perspective, but also it helps guide and shape business decisions. If the velocity isn’t what is needed by the business we can look to remove bottlenecks, improve things or invest in some key areas.\n\nWith these two capabilities in place, we have a framework in which we can iterate quickly and safely. Experimentation becomes easy and very safe, we can test different business hypotheses, and see which ones work best with our customers. This should happen on an ongoing basis because this is the cornerstone of innovation.\n\n### Context is critical \n\nEvery experiment that we perform, every problem that we solve becomes valuable learning that should be accessible to everyone in the organization. Having everything (context, actions, results, learning) in one place enables us to open things up so that everyone can contribute. This requires an environment of trust where everyone feels comfortable to run small experiments that lead to improvements, and where these improvements can diffuse in your entire organization. By having a tool that just works and provides everything you need without any additional work, you gain back capacity that you can use to improve your product, overall system, or organization.\n\nIt’s been a long journey up to this point, with the purpose of taking a look beyond immediate feature comparisons and the immediate financial gain that is realized when replacing multiple tools with one. We looked at the core principles of DevOps as a map in your DevOps transformation and at GitLab as a tool to facilitate that. Improving very complex systems is hard, driving that change through your company is a challenge, knowing that you have a tool that just delivers on your needs you can focus on developing code and on your continuous improvement efforts.\n\nI hope this is useful to everyone involved in the SDLC, from the engineers who need to work with and within the system everyday, to senior leaders who need to deliver business results.\n",[9,267,1515],{"slug":3451,"featured":6,"template":686},"gitlab-and-the-three-ways-of-devops","content:en-us:blog:gitlab-and-the-three-ways-of-devops.yml","Gitlab And The Three Ways Of Devops","en-us/blog/gitlab-and-the-three-ways-of-devops.yml","en-us/blog/gitlab-and-the-three-ways-of-devops",{"_path":3457,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3458,"content":3464,"config":3469,"_id":3471,"_type":14,"title":3472,"_source":16,"_file":3473,"_stem":3474,"_extension":19},"/en-us/blog/gitlab-apis-ci",{"title":3459,"description":3460,"ogTitle":3459,"ogDescription":3460,"noIndex":6,"ogImage":3461,"ogUrl":3462,"ogSiteName":670,"ogType":671,"canonicalUrls":3462,"schema":3463},"Using Gitlab APIs: Real Use Case Scenario","Learn about how GitLab CI and APIs can help you automate bulk tasks","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681037/Blog/Hero%20Images/gitlabapi-cover.jpg","https://about.gitlab.com/blog/gitlab-apis-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Using Gitlab APIs: Real Use Case Scenario\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Arias\"}],\n        \"datePublished\": \"2020-01-22\",\n      }",{"title":3459,"description":3460,"authors":3465,"heroImage":3461,"date":3466,"body":3467,"category":1359,"tags":3468},[2141],"2020-01-22","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nGitlab APIs along with  Continuous Integration can be very helpful when executing certain bulk tasks.\n\nConsider this requirement derived from a real-world scenario\n\n* Company XYZ possess several repositories that have been organized under a Gitlab group\n\n![group](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/gitlab-group.png){: .shadow.medium.center.wrap-text}\n\n* The company needs to test the building of projects in bulk using new  hardware (Runner with different CPU Architecture) that will bring down  execution costs, whenever the build in each of the projects fails an issue must be  automatically created.\n\n![runner](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/runner.png){: .shadow.medium.center.wrap-text}\n\n* Lastly, all the issues that were automatically created whenever a project built failed,  should be collected in bulk and reported back to a Wiki\n\n![pipelineview](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/3-pipelineview-collect-issues.png){: .shadow.medium.center.wrap-text}\n\nHow do we test the building of those several projects and create issues and reports about its execution automatically? Let's use Gitlab CI and  APIs.\n\n\n## 1. Company groups and projects Structure\n\nIn this case, the set of projects were grouped under a single group, following this structure:\n\n![groupview](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/4-group-view-api-blog.png){: .shadow.medium.center.wrap-text}\n\n## 2. Automatically creating Issues leveraging Gitlab CI and API\n\nIn order to create issues using Gitlab API we will use the Issues API an example of that  can use the following cURL command:\n\n![curl](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/5-create-issue-api-gitlabapi.png){: .shadow.medium.center.wrap-text}\n\nThe API Call: \n\n `curl --request POST --header \"PRIVATE-TOKEN:$ISSUE_API_KEY\" \"https://gitlab.com/api/v4/projects/$CI_PROJECT_ID/issues?title=Build%20Failed&labels=ARMbuild&description=Project%20Tests%20Failed%20on%20ARM\"`\n\n The previous Gitlab API call can be configured to be executed whenever a job fails. Let's dissect this API Call to understand its parameters so you can potentially customize it  for your project environment\n\n* Base URL:  https://gitlab.com/api/v4/projects\n* Project where we want to add the issue:  $CI_PROJECT_ID Notice this ID is unique and corresponds to the project where the CI/CD pipeline runs \n* Issues: Endpoint we use to tell Gitlab we want to add an issue to the project\n* Parameters:\n  * Title: How we want the issue to be titled\n  * Labels: Helpful to group issues by label or type, They help you organize and tag your work so you can track and find the work items you’re interested in.\n  * Description: Field to explain the nature of the issue if needed\n\n The request is of type POST, because we are sending data to our receiver service.  For this call to be successful it requires  authentication for which we will use *PRIVATE-TOKEN* header\n\n The private token can be generated by following these steps [How-to-generate-token](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html)\n\nWhen we execute the above API call, we create an issue in the corresponding Gitlab project\n![issueproject](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/5-issues-created.png){: .shadow.medium.center.wrap-text}\n\nGreat, so once the multi-project pipeline has run,  each of the projects that failed in its building stage will create an issue warning us to double check why it failed while documenting the failure and labeling it for future follow-up.\n![multiproject](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/7.1-multiproject-pipeline-gitlabapi.png){: .shadow.medium.center.wrap-text}\n\n## 3. Automatically collecting all the issues from Gitlab Group\n\nThanks to Gitlab CI and APIs we can collect all the issues created and report them back, by adding this script  in  your pipeline stage\n\n![collectissues](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/7-collecting-issues-apiblog.png){: .shadow.medium.center.wrap-text}\n\nLet's dissect again the main API call:\n\n`curl --header \"PRIVATE-TOKEN:$GROUP_ISSUE_LIST\" \"https://gitlab.com/api/v4/groups/9123625/issues`\n\n* Base url: https://gitlab.com/api/v4/\n* Group resource: /groups/9123625\n* Issues resources: /issues \n\nThe previous API call will return a json object, the one we will save as an artifact when executing our pipeline job. Notice this artifact is created and saved automatically by Gitlab CI\nGreat! So far we created issues per failed project, and collected them all in one single step\n\n\n## 4. Reporting back to Wiki Project \n\n![wikijob](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/8-reportwiki-gitlab-api.png){: .shadow.medium.center.wrap-text}\n\nFor convenience, the json report was transformed to markdown, then using the following script we publish the markdown report to the Wiki of an specific project\n\n`curl --data \"format=markdown&title=$CI_JOB_ID&content=$results\" --header \"PRIVATE-TOKEN:$API_WIKI\" \"https://gitlab.com/api/v4/projects/20852684/wikis\"`\n\nLet's breakdown again the API call:\n\n* Base url: https://gitlab.com/api/v4/\n* Project resource ID : /projects/20852684\n* Wiki resource: /wiki\n* Parameters: \n  * Data format: markdown. We want to publish a markdown table\n  * Title: Title of the Wiki entry, we use the environment variable corresponding to the CI_JOB that was executed\n  * Content: The markdown table generated with the issues collection\n\n Finally, when the last API call has been executed, this is an example of the output we can get: \n\n ![report](https://about.gitlab.com/images/blogimages/gitlab-apis-ci/10-test-report-gitlabapi.png){: .shadow.medium.center.wrap-text}\n\nLet's recapitulate, by using Gitlab CI in a multi project pipeline along with APIs we were able to test and report automatically x-number of projects and its compatibility with a new hardware CPU architecture. More information about the APIs utilized for this project here:\n\n[Issues-api](https://docs.gitlab.com/ee/api/issues.html#new-issue)\n[Collect-group-issues](https://docs.gitlab.com/ee/api/issues.html#list-group-issues)\n[WikisAPI](https://docs.gitlab.com/ee/api/wikis.html)\n\n[Multi-project-pipeline](https://about.gitlab.com/blog/cross-project-pipeline/)\n\n\nIf you’d like to see GitLab’s API in action, watch this [video](https://youtu.be/zdBwMHARkU0?t=469).\n\nFor more information, visit [LEARN@GITLAB](https://about.gitlab.com/learn/).\n\nCover image credit:\n\nCover image by [Mohanan](https://unsplash.com/photos/yQpAaMsQzYE) on [Unsplash](https://unsplash.com)\n{: .note}\n\n",[976,1243,9,1731],{"slug":3470,"featured":6,"template":686},"gitlab-apis-ci","content:en-us:blog:gitlab-apis-ci.yml","Gitlab Apis Ci","en-us/blog/gitlab-apis-ci.yml","en-us/blog/gitlab-apis-ci",{"_path":3476,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3477,"content":3483,"config":3489,"_id":3491,"_type":14,"title":3492,"_source":16,"_file":3493,"_stem":3494,"_extension":19},"/en-us/blog/gitlab-auto-devops-in-action",{"title":3478,"description":3479,"ogTitle":3478,"ogDescription":3479,"noIndex":6,"ogImage":3480,"ogUrl":3481,"ogSiteName":670,"ogType":671,"canonicalUrls":3481,"schema":3482},"GitLab Auto DevOps in action","See how the only single application for the entire DevOps lifecycle helps you deliver better software, faster.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664015/Blog/Hero%20Images/laptop.jpg","https://about.gitlab.com/blog/gitlab-auto-devops-in-action","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Auto DevOps in action\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2018-08-10\",\n      }",{"title":3478,"description":3479,"authors":3484,"heroImage":3480,"date":3486,"body":3487,"category":791,"tags":3488},[3485],"Aricka Flowers","2018-08-10","\n\nBetter and faster. These two words best describe the production goals of the IT leaders and engineers building today’s cutting-edge software. And GitLab [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) can help them hit those goals while improving their overall business outcomes.\n\nAs the only single application for the complete [DevOps](/topics/devops/) lifecycle, GitLab Auto DevOps gives development teams all the tools they need to deliver secure, high-quality software at previously unattainable speeds. The secret sauce that makes Auto DevOps so effective is the way it automatically sets up the required integrations and pipeline needed to get your software out of the door faster. With Auto DevOps, your code is automatically tested for quality, scanned for security vulnerabilities and licensing issues, packaged and then set up for monitoring and deployment, leaving engineers with time to place more attention on creating a better product.\n\nThis may all make sense in theory, but as they say, a picture is worth 1,000 words. And it is [rumored](https://idearocketanimation.com/4293-video-worth-1-million-words/?) that video is worth 1.8 million words. With that being said, why not take a look at GitLab Auto DevOps in action? \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/4Uo_QP9rSGM\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWant to learn more about GitLab Auto DevOps? Check out our [documentation](https://docs.gitlab.com/ee/topics/autodevops/), [feature](https://docs.gitlab.com/ee/topics/autodevops/) and [product vision](/direction/) pages.\n\n\nCover photo by [Ash Edmonds](https://unsplash.com/photos/Koxa-GX_5zs) on [Unsplash](https://unsplash.com/)\n{: .note}\n\n",[9,728,1158,683,875,1339,916],{"slug":3490,"featured":6,"template":686},"gitlab-auto-devops-in-action","content:en-us:blog:gitlab-auto-devops-in-action.yml","Gitlab Auto Devops In Action","en-us/blog/gitlab-auto-devops-in-action.yml","en-us/blog/gitlab-auto-devops-in-action",{"_path":3496,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3497,"content":3503,"config":3508,"_id":3510,"_type":14,"title":3511,"_source":16,"_file":3512,"_stem":3513,"_extension":19},"/en-us/blog/gitlab-chart-works-towards-kubernetes-1-22",{"title":3498,"description":3499,"ogTitle":3498,"ogDescription":3499,"noIndex":6,"ogImage":3500,"ogUrl":3501,"ogSiteName":670,"ogType":671,"canonicalUrls":3501,"schema":3502},"GitLab Chart works towards Kubernetes 1.22","New minimum version is 1.19 for in-chart NGINX Ingress Controller.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670178/Blog/Hero%20Images/GitLab-Ops.png","https://about.gitlab.com/blog/gitlab-chart-works-towards-kubernetes-1-22","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Chart works towards Kubernetes 1.22\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2021-12-17\",\n      }",{"title":3498,"description":3499,"authors":3504,"heroImage":3500,"date":3505,"body":3506,"category":791,"tags":3507},[745],"2021-12-17","\n\nWe are working to make the GitLab Chart and the GitLab Operator support Kubernetes 1.22, which requires updating the NGINX Ingress Controller used within the Chart and Operator.\n\nThis update requires that we drop support for versions of Kubernetes prior to 1.19 if using the in-chart NGINX Ingress Controller. Users that still require support for Kubernetes 1.18 and prior releases will only be able to deploy up to Chart version 5.5.x.\n\n## More details on the changes\n\nGitLab uses a [forked version](https://docs.gitlab.com/charts/charts/nginx/fork.html) of the community-supported ingress-nginx Chart to expose the GitLab components via Ingresses. \n\nSupporting Kubernetes 1.22 requires updating the included NGINX Ingress Controller to [version 1.0.4](https://github.com/kubernetes/ingress-nginx/releases/tag/controller-v1.0.4) in order to support the networking.k8s.io/v1 API in Kubernetes 1.22. The previous networking API (networking.k8s.io/v1beta1) has been deprecated since Kubernetes 1.19 and removed in Kubernetes 1.22.\n\nAs a result of the upgrade, we are bound to the breaking change of NGINX Ingress Controller, removing support before Kubernetes 1.19. They provide more clarification in [their FAQ](https://kubernetes.github.io/ingress-nginx/#faq-migration-to-apiversion-networkingk8siov1).\n\nThe forked ingress-nginx Chart is based on [version 4.0.6](https://artifacthub.io/packages/helm/ingress-nginx/ingress-nginx/4.0.6) of ingress-nginx/ingress-nginx, which uses [version 1.0.4](https://github.com/kubernetes/ingress-nginx/releases/tag/controller-v1.0.4) of the NGINX Ingress Controller.\n\n## Who is impacted\n\nAny deployment which is making use of the NGINX Ingress Controller provided by the GitLab Chart. This covers most, but far from all, users of our Helm Chart and Operator. If you are using an alternate Ingress provider (such as AWS ALB, Azure Application Gateway, or Google GCE Ingress), you will not be affected.\n\n## What to expect\n\nWe recognize that this change may have unintended effects, but most GitLab instances will seamlessly transition to the new NGINX Ingress Controller without incident. As always, we recommend a backup be created prior to upgrading the GitLab Chart or GitLab Operator, which will allow your data to be safeguarded should a recovery be necessary, caused by complications in the upgrade.\n\nDepending upon the environment and/or cloud provider, it is possible that when NGINX Ingress Controller is replaced during the upgrade process that the IP addresses associated with the Ingresses may change. This may require that the DNS records for the GitLab instance be updated if a controller such as external-dns is not managing the DNS records. The DNS records related to the following Ingress objects may be affected:\n\n* gitlab.\n* registry.\n* minio. (if used)\n* kas. (if used)\n\nIf the GitLab Pages component is enabled, there may be other DNS records that will need to be updated to connect to the proper Ingress.\n\n## What if there is a problem with the upgrade?\n\nWhile it is not expected that an upgrade will cause a problem, not all environments or configurations can be anticipated. In the event that there is an upgrade problem, please contact GitLab Support if you are a licensed customer. If you are running the Community Edition of GitLab, please open an issue in the [GitLab Chart](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/new?issue%5Bmilestone_id%5D=) or [GitLab Operator](https://gitlab.com/gitlab-org/cloud-native/gitlab-operator/-/issues/new?issue%5Bmilestone_id%5D=) projects.\n",[9,683,1477],{"slug":3509,"featured":6,"template":686},"gitlab-chart-works-towards-kubernetes-1-22","content:en-us:blog:gitlab-chart-works-towards-kubernetes-1-22.yml","Gitlab Chart Works Towards Kubernetes 1 22","en-us/blog/gitlab-chart-works-towards-kubernetes-1-22.yml","en-us/blog/gitlab-chart-works-towards-kubernetes-1-22",{"_path":3515,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3516,"content":3522,"config":3527,"_id":3529,"_type":14,"title":3530,"_source":16,"_file":3531,"_stem":3532,"_extension":19},"/en-us/blog/gitlab-ci-cd-is-for-multi-cloud",{"title":3517,"description":3518,"ogTitle":3517,"ogDescription":3518,"noIndex":6,"ogImage":3519,"ogUrl":3520,"ogSiteName":670,"ogType":671,"canonicalUrls":3520,"schema":3521},"GitLab CI/CD is for multi-cloud","Can cloud providers (and their tools) ever be cloud agnostic? We discuss GitHub Actions and GitLab CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678401/Blog/Hero%20Images/gitlab-for-multicloud.jpg","https://about.gitlab.com/blog/gitlab-ci-cd-is-for-multi-cloud","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab CI/CD is for multi-cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-11-06\",\n      }",{"title":3517,"description":3518,"authors":3523,"heroImage":3519,"date":3524,"body":3525,"category":679,"tags":3526},[788],"2019-11-06","\nAs organizations continue to go all-in on cloud-first strategies, optimizing their cloud architectures is becoming a top priority. It’s estimated that investments in infrastructure to support cloud computing account for [more than a third of all IT spending](https://www.zdnet.com/article/top-cloud-providers-2019-aws-microsoft-azure-google-cloud-ibm-makes-hybrid-move-salesforce-dominates-saas/). Using multiple cloud providers with multiple cloud services requires an architecture that enables workflow portability, and organizations will need an unbiased, multi-cloud strategy to make that a reality.\n\n## What is multi-cloud?\n\nMulti-cloud describes [how enterprises use multiple cloud providers to meet different technical or business requirements](https://www.zdnet.com/article/multicloud-everything-you-need-to-know-about-the-biggest-trend-in-cloud-computing/). At its core, multi-cloud is made possible through cloud-native applications built from containers using services from different cloud providers. It allows for multiple services to be managed in one architecture. [85% of enterprises currently operate in multiple clouds](https://www.ibm.com/blogs/cloud-computing/2018/10/19/survey-multicloud-management-tools/), but just because an organization uses multiple cloud providers doesn’t necessarily mean they are multi-cloud.\n\nBeing dependent on one cloud provider can limit the flexibility of an organization and leave it susceptible to vendor lock-in. Workflow portability is one of the benefits of multi-cloud and it enables a seamless workflow, regardless of _where_ you deploy.\n\nIn addition to workflow portability, there are several reasons why most businesses have adopted multi-cloud, and why more will continue to use this approach:\n\n*   **Greater flexibility**: Each cloud vendor shines in some areas and is weak in others. Using multiple vendors lets you use the right tool for the job.\n*   **Better acquisitions**: Whether an organization wants to grow through acquisitions (or be acquired itself), existing systems can work within another company’s infrastructure, even if both are using separate cloud providers.\n*   **Increased resilience**: Architecting failover between multiple cloud providers lets you stay up even if one of your vendors is down.\n*   **Improved cloud negotiations**: If another cloud vendor offers better terms or significant credits, businesses can have better leverage because their [DevOps processes](/topics/devops/) are not tied to vendor-specific services.\n*   **Fewer conflicts of interest**: With cloud service providers offering so many different services, you’re less likely to find yourself [in conflict with customers competing in those same spaces](https://www.cnbc.com/2017/06/21/wal-mart-is-reportedly-telling-its-tech-vendors-to-leave-amazons-cloud.html).\n\nA multi-cloud strategy allows organizations to use the tools and services that work best for the job, not just tools that work within their cloud environment.\n\n## Can cloud providers really support multi-cloud?\n\nCloud service providers continually compete with each other to provide more services to keep customers in their cloud. The more services you have with one CSP, the less likely you are to migrate those workloads. AWS offers 90 different services, as does GCP. In comparison, [Microsoft lists over 160 services on its Azure product page](https://www.parkmycloud.com/cloud-services-comparison/) and many of them are integrations with other Microsoft products. Cloud service providers want to have more of your business by making you more dependent on their specific services.\n\nEven though most cloud providers claim to support multi-cloud, migrating workloads out of their cloud isn’t in their best interest. As cloud computing is a pay-per-use model, it seems unlikely that multi-cloud would be a goal for the large cloud providers.\n\n## Implementing CI/CD in the cloud\n\nIn the [RightScale 2019 State of the Cloud Report](https://info.flexera.com/CM-REPORT-State-of-the-Cloud), 33% of respondents mentioned [implementing CI/CD](/topics/ci-cd/) in the cloud as a top cloud initiative. DevOps processes play a big role in multi-cloud deployments, so if organizations are wanting to build faster and deploy anywhere, CI/CD will be a key factor in that success. Multi-cloud is all about being cloud-agnostic, and your tools should also support that goal.\n\nBut what if your CI/CD comes from a cloud provider?\n\n### GitHub Actions and GitLab CI/CD\n\nIn 2018, [GitHub announced Actions](/blog/github-launch-continuous-integration/) with CI-like functionality built into a single application offering. The industry has shown us in the past year that single application functionality [is becoming a trend](/blog/built-in-ci-cd-version-control-secret/), and GitLab has been a part of that single application message since the beginning. Now that continuous integration has caught up with the importance of single application, we have to examine how both GitHub and GitLab fit into multi-cloud deployments.\n\nIn June 2018 [Microsoft acquired GitHub](/blog/microsoft-acquires-github/), which really affirmed the importance of software developers and modern DevOps. Developer tools have a high capacity for driving cloud usage because once you have your application code hosted, the natural next step is finding a place to deploy it. From a strategic standpoint, this acquisition made a lot of sense for Microsoft because they could use [GitHub’s popularity as a source code management tool as a springboard for greater Azure adoption](https://www.techrepublic.com/article/with-github-acquisition-microsoft-wants-to-make-azure-the-default-cloud-for-developers/).\n\nWhen we talk about multi-cloud in the CI/CD conversation, cloud-agnosticism kind of goes out the window when it comes to GitHub Actions. GitHub’s ubiquity in the SCM market means that millions of developers are using that platform, and it’s those users that [made GitHub such an appealing asset for Microsoft](/blog/microsoft-acquires-github/).\n\nGitLab, in comparison, is cloud-independent. When organizations use GitLab CI/CD, there is no conflict of interest in using one cloud provider over another. Being truly cloud-agnostic means that GitLab provides a complete [DevOps platform](/solutions/devops-platform/) that allows teams to have the same productivity metrics, the same governance, regardless of what cloud you use.\n\n“Choosing a cloud provider should depend on the company’s business objectives, it should not be constrained by technology, and GitLab wants to enable every one of our customers to have this freedom,” says [Sid Silbrandij](/company/team/#sytses), co-founder and CEO at GitLab.\n\n## Multi-cloud should mean any cloud\n\nBusinesses want to choose cloud providers for their inherent value and use the services that best meet their needs. In turn, we should expect our DevOps processes to support multi-cloud objectives. Partnering with cloud-agnostic vendors provides a consistent workflow across all clouds, and CI/CD will play a big role in the multi-cloud future.\n\nWe’d love for you to watch our webcast _Mastering your CI/CD_ so you can see for yourself how GitLab’s industry-leading CI/CD helps teams build, test, deploy, and monitor code on any cloud.\n\n[Watch the webcast](/competition/github/)\n{: .alert .alert-gitlab-purple .text-center}\n\nCover image by [Alexandre Chambon](https://unsplash.com/@goodspleen?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText).\n{: .note}\n",[109,683,9],{"slug":3528,"featured":6,"template":686},"gitlab-ci-cd-is-for-multi-cloud","content:en-us:blog:gitlab-ci-cd-is-for-multi-cloud.yml","Gitlab Ci Cd Is For Multi Cloud","en-us/blog/gitlab-ci-cd-is-for-multi-cloud.yml","en-us/blog/gitlab-ci-cd-is-for-multi-cloud",{"_path":3534,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3535,"content":3541,"config":3547,"_id":3549,"_type":14,"title":3550,"_source":16,"_file":3551,"_stem":3552,"_extension":19},"/en-us/blog/gitlab-com-artifacts-cdn-change",{"title":3536,"description":3537,"ogTitle":3536,"ogDescription":3537,"noIndex":6,"ogImage":3538,"ogUrl":3539,"ogSiteName":670,"ogType":671,"canonicalUrls":3539,"schema":3540},"GitLab.com CI artifacts to use Google Cloud CDN","GitLab CI users might benefit from faster downloads from edge caches closest to the user's location.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663009/Blog/Hero%20Images/ESA_case_study_image.jpg","https://about.gitlab.com/blog/gitlab-com-artifacts-cdn-change","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab.com CI artifacts to use Google Cloud CDN\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Stan Hu\"}],\n        \"datePublished\": \"2022-10-25\",\n      }",{"title":3536,"description":3537,"authors":3542,"heroImage":3538,"date":3544,"body":3545,"category":726,"tags":3546},[3543],"Stan Hu","2022-10-25","\n\nOver the next month and going forward, requests for GitLab CI artifacts downloads may be redirected\nto [Google Cloud CDN](https://cloud.google.com/cdn) instead of\n[Google Cloud Storage](https://cloud.google.com/storage). We anticipate that GitLab CI users may benefit from faster\ndownloads from edge caches closest to your location.\n\n**Disclaimer:** This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab.\n\n## How will this work?\n\nCurrently when a CI runner or other client [downloads a CI artifact](https://docs.gitlab.com/ee/api/job_artifacts.html),\nGitLab.com responds with a 302 redirect to a time-limited, pre-signed URL with a domain of `storage.googleapis.com`.\n\nAfter this change, the domain will change to `cdn.artifacts.gitlab-static.net`.\n\nThe exception is for requests originating from within the Google Cloud\nPlatform. These will continue to be redirected to Cloud Storage.\n\n## When will this change occur?\n\nWe expect to start the transition around the end of October 2022. This will be a\ngradual transition using a percentage-based rollout, so we anticipate that you will see\nan increasing number of your requests redirected to Google Cloud\nCDN instead of Google Cloud Storage until all of the requests are served by the\nformer.\n\nYou can follow along with the progress of this initiative and raise any\nquestions in [this issue](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/7894). We\nwill post more detailed timelines in that issue as we refine the rollout\nplan.\n\n## How does this change impact you?\n\nSince GitLab CI runners and certain clients automatically handle URL\nredirections already, we expect that downloads for CI artifacts should\ncontinue to work without any action.\n\nWe encourage upgrading to the latest version of the GitLab Runner in\norder to take advantage of the CDN. This feature was [introduced in\nGitLab Runner v13.1.0](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/2115).\nIf a runner cannot download from the CDN host, it will retry without the\nCDN and download the artifact directly through GitLab.com.\n\nHowever, if you have a firewall that only allows\n`storage.googleapis.com`, you will need to add\n`cdn.artifacts.gitlab-static.net` (34.110.204.38) to the allow list.\n\n### What do these warning messages mean?\n\nWith this change, users may see warning messages in the CI job logs:\n\n#### read: connection reset by peer\n\n```plaintext\nERROR: Downloading artifacts from coordinator... error couldn't execute GET against https://gitlab.com/api/v4/jobs/\u003Cjob id>/artifacts?direct_download=true: Get \"https://cdn.artifacts.gitlab-static.net/...\nread tcp 172.17.0.2:59332->34.110.204.38:443: read: connection reset by peer  id=1234 token=\u003Csome token>\nWARNING: Retrying...                                error=invalid argument\nDownloading artifacts from coordinator... ok        id=1234 responseStatus=200 OK token=\u003Csome token>\n```\n\nThis error suggests the runner was not able to access the CDN. Check\nyour network firewalls and allow access to the IP 34.110.204.38.\n\nNote that there are two `Downloading artifacts from coordinator`\nmessages. The second attempt succeeded because the runner retried\nwithout the CDN.\n\n#### x509: certificate signed by unknown authority\n\n```plaintext\nERROR: Downloading artifacts from coordinator... error couldn't execute GET against https://gitlab.com/api/v4/jobs/\u003Cjob id>/artifacts?direct_download=true: Get \"https://storage.googleapis.com/gitlab-gprd-artifacts/...: x509: certificate signed by unknown authority  id=1234 token=\u003Csome token>\n```\n\nIf you see this error with a Windows runner, upgrade to v15.5.0 since it\nis compiled with [Go 1.18](https://tip.golang.org/doc/go1.18), which\nsupports [using the system certificate pool](https://github.com/golang/go/issues/16736).\n\nOtherwise, this error suggests the runner is configured with [custom SSL certificates](https://docs.gitlab.com/runner/configuration/tls-self-signed.html).\nYou may need to update your certificates or include the certificates directly in the bundle.\n\n#### Authentication required\n\nSome clients may report a 401 error with `Authentication required` after\nrequesting to download a job artifact:\n\n```xml\n\u003C?xml version='1.0' encoding='UTF-8'?>\u003CError>\u003CCode>AuthenticationRequired\u003C/Code>\u003CMessage>Authentication required.\u003C/Message>\u003C/Error>\n```\n\nThis error message suggests the HTTP client is following the 302\nredirect and sending the `Authorization` header with the redirected\nURL. This is a known issue with Java HTTP clients.\n\nUpdate your client to drop the `Authorization` header the\nredirect. Google Cloud Storage ignores this header if it were set, but\nCloud CDN rejects requests that have the `Authorization` header set.\n",[9,728,793,231,976],{"slug":3548,"featured":6,"template":686},"gitlab-com-artifacts-cdn-change","content:en-us:blog:gitlab-com-artifacts-cdn-change.yml","Gitlab Com Artifacts Cdn Change","en-us/blog/gitlab-com-artifacts-cdn-change.yml","en-us/blog/gitlab-com-artifacts-cdn-change",{"_path":3554,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3555,"content":3561,"config":3567,"_id":3569,"_type":14,"title":3570,"_source":16,"_file":3571,"_stem":3572,"_extension":19},"/en-us/blog/gitlab-com-container-registry-cdn-change",{"title":3556,"description":3557,"ogTitle":3556,"ogDescription":3557,"noIndex":6,"ogImage":3558,"ogUrl":3559,"ogSiteName":670,"ogType":671,"canonicalUrls":3559,"schema":3560},"GitLab.com Container Registry to use Google Cloud CDN","The GitLab.com Container Registry will now interface with the Google Cloud Content Delivery Network","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670766/Blog/Hero%20Images/container-reg-cdn-blog.jpg","https://about.gitlab.com/blog/gitlab-com-container-registry-cdn-change","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab.com Container Registry to use Google Cloud CDN\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darren Eastman\"}],\n        \"datePublished\": \"2022-01-13\",\n      }",{"title":3556,"description":3557,"authors":3562,"heroImage":3558,"date":3564,"body":3565,"category":299,"tags":3566},[3563],"Darren Eastman","2022-01-13","\n\nIn January 2022, we are working on implementing a change to the Container Registry on GitLab.com. The GitLab Container Registry will now interface with the Google Cloud Content Delivery Network [CDN](https://cloud.google.com/cdn) to optimize costs and improve performance. When implemented, the system will redirect download requests for blobs stored in the GitLab Container Registry to Google Cloud CDN instead of Google Cloud Storage, as is the case today. We expect GitLab CI users to benefit from faster image downloads for those image layers retrieved from edge caches closest to your location.\n\n**Disclaimer** This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab.\n\n## How will this work?\n\nAuthorized requests for [downloading a blob](https://docs.docker.com/registry/spec/api/#pulling-a-layer) and [checking if a blob exists](https://docs.docker.com/registry/spec/api/#existing-layers) in the [GitLab.com Container Registry](https://docs.gitlab.com/ee/user/packages/container_registry) will be redirected to the Google Cloud CDN at `cdn.registry.gitlab-static.net`. So far, these requests were redirected to Google Cloud Storage at `storage.googleapis.com`.\n\nThe exception is for requests originating from within the Google Cloud Platform. These will continue to be redirected to Cloud Storage.\n\n## When will this change occur?\n\nWe expect to start the transition in late January 2022. This will be a gradual transition using a percentage-based rollout, so you can expect an increasing number of your requests to be redirected to Google Cloud CDN instead of Google Cloud Storage until all of them are served by the former.\n\nYou can follow along with the progress of this initiative and raise any questions in this [issue](https://gitlab.com/gitlab-org/gitlab/-/issues/350048). We will post more detailed timelines in that issue as we refine the rollout plan.\n\n## How does this change impact you?\n\nSince most client tools, such as the Docker CLI, handle redirections automatically, this change will be imperceptible for most users on GitLab.com.\n\nHowever, if you are allow listing `storage.googleapis.com`, you will need to add `cdn.registry.gitlab-static.net` to the allow list as well. Please keep both endpoints on your allow list for the time being, as the transition will be gradual. There will be another blog post once the transition is complete.\n\n\nCover image by [Pat Kay](https://unsplash.com/photos/3d7DTnuNj6E) on [Unsplash](https://unsplash.com)\n{: .note}\n",[9,728,793,231],{"slug":3568,"featured":6,"template":686},"gitlab-com-container-registry-cdn-change","content:en-us:blog:gitlab-com-container-registry-cdn-change.yml","Gitlab Com Container Registry Cdn Change","en-us/blog/gitlab-com-container-registry-cdn-change.yml","en-us/blog/gitlab-com-container-registry-cdn-change",{"_path":3574,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3575,"content":3580,"config":3586,"_id":3588,"_type":14,"title":3589,"_source":16,"_file":3590,"_stem":3591,"_extension":19},"/en-us/blog/gitlab-com-container-registry-update",{"title":3576,"description":3577,"ogTitle":3576,"ogDescription":3577,"noIndex":6,"ogImage":928,"ogUrl":3578,"ogSiteName":670,"ogType":671,"canonicalUrls":3578,"schema":3579},"Announcing an exciting update to the GitLab.com Container Registry","A new version of our Container Registry is coming with improvements we're excited about. Here's what you need to know.","https://about.gitlab.com/blog/gitlab-com-container-registry-update","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Announcing an exciting update to the GitLab.com Container Registry\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Rizzi\"}],\n        \"datePublished\": \"2021-10-25\",\n      }",{"title":3576,"description":3577,"authors":3581,"heroImage":928,"date":3583,"body":3584,"category":726,"tags":3585},[3582],"Tim Rizzi","2021-10-25","\n\nIn the coming weeks, we are planning to roll out a new version of the Container Registry on GitLab.com. Prior to deploying this major update, we wanted to clearly communicate the planned changes, what to expect, and why we are excited about this update. \n\nIf you have any questions or concerns, please don’t hesitate to comment in the [epic](https://gitlab.com/groups/gitlab-org/-/epics/5523). \n\n## Context \n\nIn [milestone 8.8](/releases/2016/05/22/gitlab-8-8-released/), GitLab launched the MVC of the Container Registry. This feature integrated the Docker Distribution registry into GitLab so that any GitLab user could have a space to publish and share container images. \n\nBut there was an inherent limitation with Docker Distribution as all metadata associated with a given image/tag was stored in the storage backend. This made using that metadata to build API features like storage usage visibility and sorting and filtering unfeasible. With the most recent update to the Container Registry, we’ve added a new metadata database that will store all of the metadata in Postgres instead of the storage backend. This will allow us to unblock many of the features that you’ve been asking for.\n\n## Why we are excited \n\n- [Storage visibility for the container registry](https://gitlab.com/groups/gitlab-org/-/epics/7225)\n- Performance improvements for list operations when using the GitLab API and UI\n- [Redesign of the UI](https://gitlab.com/groups/gitlab-org/-/epics/3211), including\n  - [Build and commit metadata for tags built via CI](https://gitlab.com/gitlab-org/gitlab/-/issues/197996)\n  - [Search by tag name](https://gitlab.com/gitlab-org/gitlab/-/issues/255614)\n  \n## The plan \n\nWe're planning a phased migration, starting with newly-created repositories. We'll roll this out incrementally to maintain safety for those customers and provide our team with an opportunity to identify and address any concerns. \n\n## Timing \n\nWe're starting the percentage-based rollout on October 26th, 2021, with GitLab internal projects' customers with less usage, which we expect to take 4 to 6 weeks. For more information about the planned, percentage-based rollout, please refer to this [epic](https://gitlab.com/groups/gitlab-org/-/epics/6426). \n\nOnce we complete that work, we’ll switch to customers who heavily use the Container Registry for new repositories. \n\n## FAQ \n\n- You mentioned new image repositories, but what about existing image repositories? \n  - The migration of newly-created repositories is phase 1 of this project. Once complete, we have some planned development work and then will begin to schedule the migration of existing repositories. Please stay tuned or follow along in this [epic](https://gitlab.com/groups/gitlab-org/-/epics/5523) for more information. \n- Do I need to do anything?\n  - No, the process is fully automated. \n- Is there anything I can do to help? \n  - Yes! Although no action is necessary, we recommend activating the Container Registry [cleanup policies](https://docs.gitlab.com/ee/user/packages/container_registry/#cleanup-policy) for any relevant projects. This will make [phase 2](https://gitlab.com/groups/gitlab-org/-/epics/6427) of the migration much faster. \n- Is the update required? \n  - Yes. This change will allow us to deliver a more modern and scalable product and you don’t want to miss out on those features.\n- Will there be any downtime?\n  - For phase 1 of the migration, which will focus on new image repositories, there is no expected downtime. \n- How can we learn more about phase 2? \n  - Right now we are focused on phase 1, but please feel free to ask any questions you may have in this [epic](https://gitlab.com/groups/gitlab-org/-/epics/6427).\n",[9,728,231],{"slug":3587,"featured":6,"template":686},"gitlab-com-container-registry-update","content:en-us:blog:gitlab-com-container-registry-update.yml","Gitlab Com Container Registry Update","en-us/blog/gitlab-com-container-registry-update.yml","en-us/blog/gitlab-com-container-registry-update",{"_path":3593,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3594,"content":3600,"config":3606,"_id":3608,"_type":14,"title":3609,"_source":16,"_file":3610,"_stem":3611,"_extension":19},"/en-us/blog/gitlab-dedicated-available",{"title":3595,"description":3596,"ogTitle":3595,"ogDescription":3596,"noIndex":6,"ogImage":3597,"ogUrl":3598,"ogSiteName":670,"ogType":671,"canonicalUrls":3598,"schema":3599},"GitLab Dedicated single-tenant SaaS now generally available","Achieve control and convenience with a fully managed DevSecOps platform.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663948/Blog/Hero%20Images/dedicatedcoverimage.png","https://about.gitlab.com/blog/gitlab-dedicated-available","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Dedicated single-tenant SaaS now generally available\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andrew Thomas\"}],\n        \"datePublished\": \"2023-06-15\",\n      }",{"title":3595,"description":3596,"authors":3601,"heroImage":3597,"date":3603,"body":3604,"category":726,"tags":3605},[3602],"Andrew Thomas","2023-06-15","\nLast year, we launched the [Limited Availability release of GitLab Dedicated](https://about.gitlab.com/blog/introducing-gitlab-dedicated/), a fully managed, single-tenant SaaS deployment of our comprehensive DevSecOps platform designed to address the needs of customers with stringent compliance requirements. Since then, we’ve worked closely with our Limited Availability customers, incorporating their feedback into targeted improvements and essential new features. \n\nWe are excited to share that [GitLab Dedicated is now generally available](https://about.gitlab.com/dedicated/), complete with compliance features such as the ability for customers to encrypt the data stored in their instance with their own encryption key.\n\nWith GitLab Dedicated, organizations can access all of the benefits of the DevSecOps platform – including faster releases, better security, and more productive developers – while satisfying compliance requirements such as data residency, isolation, and private networking.\n\nAccording to [GitLab’s 2023 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/previous/2023/), 44% of operations professionals said that their current role involves managing hardware and/or infrastructure “all of the time” or “most of the time.” GitLab Dedicated alleviates that burden, enabling organizations to focus on their core business model and meet their compliance needs without the overhead of managing a complex DevSecOps environment.\n\n## Flexibility and convenience\nOrganizations can achieve a lower total cost of ownership and quicker time to value with GitLab Dedicated, compared to hosting the platform themselves, while maintaining high operational standards.\n\n**A fully managed solution:** When software is not upgraded to the latest versions, organizations use obsolete and inefficient software that can be exposed to security threats. Because GitLab Dedicated is fully managed by GitLab, customers get access to the latest software features and security updates. \n\n**Data residency in the region of your choice:** Customers frequently ask us about data residency to meet stringent compliance requirements, which vary across different regions around the world. GitLab Dedicated can be deployed in [30+ regions](https://docs.gitlab.com/ee/subscriptions/gitlab_dedicated/) to meet these requirements.\n\n**High availability and scalability:** To meet the needs of large or rapidly scaling organizations, GitLab Dedicated uses a cloud native architecture that can support up to 50,000 users, with a disaster recovery plan and [availability targets](https://about.gitlab.com/handbook/engineering/infrastructure/team/gitlab-dedicated/slas/) to satisfy reliability needs. \n\n## Control and compliance\nThe need to have control over data and achieve compliance has never been greater. GitLab Dedicated offers data residency, tenant isolation, and private networking to help customers meet stringent compliance requirements. \n\n**Enterprise-grade security:** Customers require assurance that their data and access to their data is secure. GitLab Dedicated allows customers to implement necessary controls to protect their software delivery platform and meet compliance requirements. This includes access control using SAML-based authentication and authorization, secure communications with IP allow lists, private connectivity, and data encryption both at rest and in transit. \n\n**Full data and source code IP isolation:** As a single-tenant deployment, GitLab Dedicated helps to isolate data and source code from other tenants. Customers can also choose to encrypt the data stored in their instance with their own encryption key. \n\n**Full control over your data:** While GitLab fully manages the DevSecOps platform, customers have full control over the data it hosts, the region the data resides in, and securing the data themselves. Customers also retain full administrative access to the DevSecOps platform itself.\n\n## Looking ahead: AI and GitLab Dedicated\nGitLab Dedicated is a single-tenant deployment preferred by organizations with complex compliance requirements, so we plan to integrate AI into GitLab Dedicated without compromising on compliance requirements like data residency, isolation, and predictability. \n- In the near term, we will introduce AI features like Code Suggestions and Suggested Reviewers into GitLab Dedicated once they are made generally available.\n- In the long term, we will explore incorporating native AI capabilities such as training models to generate tailored insights and suggestions while keeping data private.\n\nTo learn more about what’s coming, follow the [GitLab Dedicated roadmap](https://about.gitlab.com/direction/saas-platforms/dedicated/#roadmap).\n\n## Learn more about GitLab Dedicated\nGitLab Dedicated includes all of the capabilities of GitLab Ultimate, with the added benefits of single-tenant architecture, regional data residency, and platform management by GitLab. With GitLab Dedicated, customers can realize operational efficiencies and deliver secure software faster. \n\nLearn more about [GitLab Dedicated](https://about.gitlab.com/dedicated/) today.\n",[9,728,1180],{"slug":3607,"featured":6,"template":686},"gitlab-dedicated-available","content:en-us:blog:gitlab-dedicated-available.yml","Gitlab Dedicated Available","en-us/blog/gitlab-dedicated-available.yml","en-us/blog/gitlab-dedicated-available",{"_path":3613,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3614,"content":3619,"config":3624,"_id":3626,"_type":14,"title":3627,"_source":16,"_file":3628,"_stem":3629,"_extension":19},"/en-us/blog/gitlab-education-solutions",{"title":3615,"description":3616,"ogTitle":3615,"ogDescription":3616,"noIndex":6,"ogImage":928,"ogUrl":3617,"ogSiteName":670,"ogType":671,"canonicalUrls":3617,"schema":3618},"GitLab solutions for education","You spoke, we listened! Check out our new and improved options for bringing GitLab to your campus.","https://about.gitlab.com/blog/gitlab-education-solutions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab solutions for education\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christina Hupy, Ph.D.\"}],\n        \"datePublished\": \"2021-03-30\",\n      }",{"title":3615,"description":3616,"authors":3620,"heroImage":928,"date":3621,"body":3622,"category":726,"tags":3623},[2316],"2021-03-30","\n\nIt's been an exciting year for the GitLab for Education Program! We hit 2 million all-time seats issued, we connected with many of our Program Members through coffee chats, on issues, in our customer reference program, and through our GitLab for [Education Program 2020 Program Survey](/solutions/education/edu-survey/).\n\nThrough these conversations and the survey, you've shared with us your successes and some of your challenges with bringing the DevOps transformation to your campus.\nOur survey yielded some great insights about your journey, which have inspired the iterations to our solutions for education (which you can read about below). Specifically:\n\n### GitLab is used extensively across the entire educational institution\n\nAdoption extends well beyond typical Computer Science departments into many different academic departments as well as administrative departments such as information technology and services. Departments ranged from natural and social sciences, to medical fields, nearly every time of engineering, to library science. Even Seismology, Planetary Science, and Astronomy – the sky is literally not even the limit for DevOps in Education.\n\nNot only is GitLab used for teaching coding, **adoption across campus is multidisciplinary**. Generally, departments are using GitLab for more than one primary purpose including teaching, research, learning, and student portfolios.\n\nMost campuses want to use **GitLab across the enterprise**. In today's modern campus, collaboration occurs across campus. In the past, license restrictions based on use case have been a barrier to collaboration and adoption.\n\nFor example, one respondent indicated that:\n\n> \"At our institution, it is not binary if you are 'administration' and 'teaching/learning.' We work on all sorts of projects for all aspects of the university from teaching/learning, to research and outreach.\"\n\nWe realize that our education community is innovating and pushing traditional boundaries and that we need to do the same in order to make adoption of DevOps across the enterprise easier for everyone.\n\nAt GitLab, we believe everyone can contribute! When you speak, we listen.\n\nOver the last year, we took a hard look at our licensing options for education, we took in your feedback, and we had long and detailed discussion of how we can better meet your needs. We did our best to think outside the box and come up with a new structure to enable the adoption of GitLab across all of campus.\n\nWe are thrilled to announce that we've revamped our existing GitLab for Education license and created a new innovative license structure specifically designed to enable campus-wide adoption, [GitLab for Campuses](/solutions/education/).\n\nBefore we get to the new offering, we'll highlight what's new in the GitLab for Education Program. The program provides **free subscriptions of GitLab Ultimate**, our top tier, to [qualifying institutions](/handbook/marketing/developer-relations/community-programs/education-program/#gitlab-for-education-program-requirements). Program members are able to choose a deployment method, self-managed or hosted (formerly Gold). GitLab for Education licenses can only be used directly for **teaching, learning, or research**. It is not authorized to run, administer, or operate an institution.\n\nThere is no limit on the number of seats a university can request. Additionally, there is now **no limit on the number of subscriptions** any one university can request. We realize that a university can have many different administrative units and that it is often difficult to coordinate across these units. We also realize that different divisions on campus may require different hosting types. This is especially true where research laboratories or centers store sensitive data and may need to have their repositories on locally isolated servers.\nThe flexibility of deployment method, either self-managed or SaaS, was noted as a large advantage of the GitLab for Education Program in our survey. Campuses are able to choose the method that works best with their security requirements and authentication systems as well as meeting research funding requirements. We are really excited to offer this flexibility to campus as a whole by allowing multiple deployment methods per campus through the program.\n\n### Requirements\nWe've seen a great increase in the number of applications, types of entities applying, and the use cases. We are surprised and amazed on a daily basis! Given this influx, we have updated and added clarity to our institutional and use case requirements.\n\nIn order to qualify for the GitLab for Education Program the institution must be accredited (by a local, state, provincial, federal, or national authorized agency), have the primary purpose of teaching its enrolled students, and be degree-granting. Institutions can be public or private but must be registered as not-for profit.\n\nWhile we value the kind of work that code camps, code academies, training centers, eLearning platforms etc. are engaged in, these entities do not qualify for the free program ([full list here](/handbook/marketing/developer-relations/community-programs/education-program/#gitlab-for-education-program-requirements)).\n\n**Acceptable use cases** include classroom use, non-commercial academic research, or organization use, where the use is directly related to a club or organization for the benefit of developing students.\nResearch that is conducted at the request of and for the benefit of a third party is **not authorized under the GitLab for Education license**. Along those lines, any activities conducted by a consulting center, super computer laboratory, or entity that provides services for the benefit of a third party are not acceptable under the free license. Please see additional [details here](/handbook/marketing/developer-relations/community-programs/education-program/#gitlab-for-education-program-requirements).\n\n## GitLab for Campuses\n\nThe free program comes with many restrictions, as outlined above, and we've heard from you that many of these restrictions are not ideal, especially for those campuses that are early adopters and are already seeing demand across campus.\n\nYou've told us that separating users based on use case or persona is not realistic for many of you. And we understand! That's why we created a unique offering that combines the best of the both our free and paid offerings with the goal of enabling adoption of GitLab across the campus enterprise.\n\nThe GitLab for Campuses offering is designed so that campuses don't have to worry about who is using GitLab or how they are using it. Any activites that occur under the official not-for-profit entity that purchased the license are acceptable. The only caveat is that entities directly affiliated with an institution that is managed and operated as a for-profit with a separate tax status than the parent institution do not qualify. For example, for-profit online programs, endowment money management arms, startup incubators, would not qualify.\n\nYes, we realize that it is entirely possible that every single faculty, staff and student may want, need, and use a GitLab account. If that is the case, first of all, you are a rockstar campus and please reach out to us because we definitely want to talk! And secondly, if that is the case, you will run out of seats. We fully realize this. In the spirit of our GitLab values, we went with the simplest viable change ([MVC](https://handbook.gitlab.com/handbook/values/#minimal-viable-change-mvc)) for this first iteration. The last published enrollment of all students per semester is an easy number to access and document, so we choose this value to base the price on. Additionally, educational institutions fall into typical sized buckets with fairly standard ratio bands of faculty to staff to students. As use expands, we will revisit this in the next iteration and if you feel that this number is a limitation, please reach out and we can work with you in the meantime.\n\n## Is GitLab for Campuses right for my institution?\n\nIs the price worth it? We can help you determine if this subscription is right for your campus. We based the GitLab for Campuses offering on the idea that students remain free and that institutions only pay for administrative use. What does this mean? This means that the offering is based on the typical number of administrative or professional users that a campus within any given size bucket could be expected to have. You can think of this as a break-even price. For example, if I have a campus of X amount of students, I may have an Y amount of administrative users. If I were to pay for just these administrative individuals in a given year, I would pay Z. Z is your break-even price. The GitLab for Campuses model is well below the break-even price for a typical campus and provides you with top-tier features with your choice of deployment method. It is a win-win!\n\nSome campuses may not be the point where their Z makes sense for the GitLab for Campuses offering just yet. Maybe GitLab or DevOps is new to your campus... Maybe your campus has vendor lock on a different solution... Or maybe only a small department is using GitLab.... If you aren't at the level where the GitLab for Campuses offering makes sense, we still have a solution for you. We've created an Academic Discount of 20% off our list price to qualifying educational institutions. This way you can just pay for the seats you need.\n\n## What's next?\n2021 is going to be an exciting year for the GitLab for Education Program. We are making significant investments into bringing DevOps to educational institutions around the world! We are automating the entire application process, making it much easier and faster to both apply and renew the free licenses. We are also investing significantly in creating learning and development content for faculty, students, and staff.\n\nStay tuned for more case studies! Check our recent examples here from Dublin City University, the [British Geological Survey](/customers/bgs/), and the Square Kilometer Array, to learn more about how DevOps is transforming teaching, learning, and research.\n\nDid you miss the survey? Don't worry, we'll send out another one in the fall of 2021 and we also have a issue template for anyone in education to provide us with feedback. [Check it out here](https://gitlab.com/gitlab-com/marketing/community-relations/community-programs/education-program/programfeedback).\n\nAre you interested in applying for the free GitLab for Education Program? [Apply now](/solutions/education/join/).\n\nWould you like to talk to our Sales team about the GitLab for Campuses offering? [Contact Sales](/sales/).\n",[9,1339],{"slug":3625,"featured":6,"template":686},"gitlab-education-solutions","content:en-us:blog:gitlab-education-solutions.yml","Gitlab Education Solutions","en-us/blog/gitlab-education-solutions.yml","en-us/blog/gitlab-education-solutions",{"_path":3631,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3632,"content":3638,"config":3646,"_id":3648,"_type":14,"title":3649,"_source":16,"_file":3650,"_stem":3651,"_extension":19},"/en-us/blog/gitlab-for-designers",{"title":3633,"description":3634,"ogTitle":3633,"ogDescription":3634,"noIndex":6,"ogImage":3635,"ogUrl":3636,"ogSiteName":670,"ogType":671,"canonicalUrls":3636,"schema":3637},"Help us shape the future of design discussion in GitLab","We've identified the need for full integration of user experience design within the DevOps lifecycle, and would love your feedback on how to make that happen.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680008/Blog/Hero%20Images/design-discussion.jpg","https://about.gitlab.com/blog/gitlab-for-designers","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Help us shape the future of design discussion in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarrah Vesselov\"}],\n        \"datePublished\": \"2018-11-08\",\n      }",{"title":3633,"description":3634,"authors":3639,"heroImage":3635,"date":3641,"body":3642,"category":299,"tags":3643},[3640],"Sarrah Vesselov","2018-11-08","\n\nAt GitLab, we do everything using, well, GitLab. Using our product as part of our workflow allows us to experience, firsthand, the limitations and frustrations that may prevent our users (and us) from being able to get work done quickly and efficiently. In the user experience (UX) department, we've found ourselves struggling with some important aspects of our day-to-day work – this is what we've found, and how we hope to address it:\n\n## Design discussions quickly become hard to follow\n\nDesign discussion happens inside of issues at GitLab. Typically, a designer will post a wireframe, mockup, or prototype within a comment on an issue to elicit feedback from others. The transparency is excellent: product managers, engineers, and designers can all come together to talk over the problem and the possible solutions. Problems creep in when conversations get too lengthy, hard to follow, and involve multiple iterations of a design. How can we make design discussion at GitLab more useful and accessible?\n\n## We need version control for design files\n\nWe use Sketch for our day-to-day design work. The UX department's Sketch files live within a [design repository](https://gitlab.com/gitlab-org/gitlab-design) to ensure that all designers have access to current patterns and solutions. However these files are not version controlled within the repository, so designers keep personal folders for work-in-progress designs. How can we version control our files within GitLab and eliminate the need to keep multiple versions of a particular design?\n\n## A competitive analysis of design platforms and applications\n\nTo start looking for solutions to these problems, we conducted a competitive analysis of the other platforms and applications out there tackling design creation, collaboration, and handoff. We wanted to know: What are other design teams doing to address these problems? Are there existing aspects of GitLab we can leverage to solve these problems? If not, what would an [MVC](/handbook/product/product-principles/#the-minimal-viable-change-mvc) look like to integrate designers more efficiently into GitLab?\n\n### Summary of findings\n\nToday's average user is tech savvy, with high expectations for interface usability. Products must be useful and easy to use for users with a wide range of backgrounds, experiences, and expectations. As a result, enterprise-level companies have invested heavily in building UX teams to produce beneficial experiences. These UX teams have distinct requirements for the toolsets they use. Design tools must be able to:\n\n* Improve UX consistency\n* Enable research and testing of designs with users\n* Clarify requirements\n* Facilitate collaboration between teams (Engineering, PM, UX)\n* Version control design files\n* Minimize duplication of work with an SSOT\n* Minimize context switching\n\nThe last requirement, minimize context switching, really stood out. Enterprise designers work on a variety of platforms. The market has exploded over the past decade, with a majority of designers moving from using desktop software to cloud-based platforms. Designers want and need a single-platform approach. They must have the ability to design, collaborate, and share their work with the rest of the organization within one platform.\n\nThis single-platform approach presents a unique opportunity for us. GitLab is the first single application built from the ground up for all stages of the DevOps lifecycle for Product, Development, QA, Security, and Operations teams to work concurrently on the same project. A significant missing piece of this lifecycle is UX design.\n\n### Areas of opportunity for GitLab:\n\n* Review and collaboration\n* Interaction design\n* Version control\n* Developer handoff\n* Design system management\n\nThe total market potential is over US $4 billion and growing. With no clear winners in the design tool space, there is a significant opportunity for an application that can successfully engage developers and design teams in the DevOps lifecycle.\n\nYou can view the [complete competitive analysis here](https://docs.google.com/document/d/12o6h6Fm7bAjhW5AK1r-PNhvn0QrQwZncorYNia12e3Q/edit?usp=sharing).\n\n## What's next?\n\nA logical place to start is by improving discussion within issues. Design proposals are available in issue descriptions, shared and discussed in comments, and it's not always clear which is the latest version. While we have the option to mark and [comment on specific image spots in the blob view and merge requests](https://docs.gitlab.com/ee/user/discussions/#image-discussions), the actual design collaboration happens much earlier in the process.\n\nOne idea is to make design artifacts a first-class citizen by linking to design assets in the side navigation of an issue. We could allow for commenting on images and propagate these comments in the sidebar for focused and cohesive discussion.\n\nWe want to know what you think! You can take a look at and comment on the [design artifacts discovery issue here](https://gitlab.com/gitlab-org/gitlab-ce/issues/53587).\n\n[Photo](https://www.pexels.com/photo/notes-clean-whiteboard-board-7067/) by [Startup Stock Photos](https://www.pexels.com/@startup-stock-photos) on Pexels.\n{: .note}\n",[749,728,3644,3645,9],"UX","design",{"slug":3647,"featured":6,"template":686},"gitlab-for-designers","content:en-us:blog:gitlab-for-designers.yml","Gitlab For Designers","en-us/blog/gitlab-for-designers.yml","en-us/blog/gitlab-for-designers",{"_path":3653,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3654,"content":3660,"config":3665,"_id":3667,"_type":14,"title":3668,"_source":16,"_file":3669,"_stem":3670,"_extension":19},"/en-us/blog/gitlab-for-project-management-one",{"title":3655,"description":3656,"ogTitle":3655,"ogDescription":3656,"noIndex":6,"ogImage":3657,"ogUrl":3658,"ogSiteName":670,"ogType":671,"canonicalUrls":3658,"schema":3659},"How our tool fosters collaborative project management","Our marketing team explains how we use GitLab to manage complex projects. Read how GitLab can improve your collaboration on projects.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680908/Blog/Hero%20Images/stickynotes.jpg","https://about.gitlab.com/blog/gitlab-for-project-management-one","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How our tool fosters collaborative project management\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-12-06\",\n      }",{"title":3655,"description":3656,"authors":3661,"heroImage":3657,"date":3662,"body":3663,"category":791,"tags":3664},[2002],"2019-12-06","\n\n_While it is true that there are few non-technical roles left in today’s business environment, it is notable that even folks outside of engineering use GitLab technology for collaborative project management. In this first part of our two-part series we outline the problems of siloed communications and how GitLab is structured to solve that for developers and everyone else. In part two, we’ll take a deep dive into how we used GitLab to manage an integrated marketing campaign and how our product marketing team uses GitLab for complex project management._\n\nImagine you’re trying to launch a new, integrated campaign. This campaign has a central message (e.g., \"Everyone can contribute\") and it pulls in representatives from many different teams – like social media, blogs, and field marketing – to create the designs and content that make this campaign a reality. The campaign structure is built and you’re ready to go – but wait – you’re working in a silo where communication between teams is challenging and there are strict rules about how information is conveyed.\n\nMarketing programs manager [Jackie Gragnola](/company/team/#jgragnola) kicked off the “GitLab for Non-Tech & Project Management Use\" breakout session at [GitLab Contribute New Orleans](/events/gitlab-contribute/) with an icebreaker game that mirrors this very conundrum. Breakout group participants were assigned teams as they tried to rebuild a gumdrop structure, but with strict communication guidelines. One person could see the structure, and relay what the structure looks like to three runners, who then described the structure to one builder.\n\nNeedless to say, the inefficiencies mounted quickly.\n\n\"The problem was one person could use their eyes, one person could use their mouth, one person could use their ears,\" said [Joyce Tompsett](/company/team/#Tompsett), analyst relations manager at GitLab and an observer/reporter in this game. \"So, even though everybody had all the component pieces they were only allowed to use one function at a time and then there was no return communication allowed.\"\n\nThe “can’t see the whole picture” problem is a common one in every industry and the solution is to make collaboration painless. [Collaboration is one of our core values at GitLab](https://handbook.gitlab.com/handbook/values/#collaboration) and it is fundamental to how we run our business and how we designed our tool. To understand how GitLab can work outside of software development it’s helpful to understand the underpinnings.\n\n## How GitLab works\n\nDeveloping software is similar in concept to baking a layer cake. You need a really strong foundation to keep your cake upright, and each coating of frosting between the cake layers acts as the glue that holds it all together. The top layer of frosting makes sure that all of your layers stay in one place (and makes sure that the layer cake is looking like a cake).\n\n![layercake](https://about.gitlab.com/images/blogimages/gitlab-for-proj-management/layercakev2.jpg){: .shadow.medium.center}\nA layer cake is a great analogy for how GitLab works as a project management tool.\n{: .note.text-center}\n\n\"The frosting between those layers is like webhooks or APIs; they’re actually the integrations that make the two pieces of software talk to each other,\" explains [JJ Cordz](/company/team/#jjcordz), senior marketing ops manager. \"Each task that's above the next one can get more complex because it's building off the foundation that you've already put into place.\"\n\nThe difference between the typical DevOps layer cake and the GitLab layer cake is that every activity or function fulfilled by a different layer of the cake (i.e., discrete piece of software) happens entirely within GitLab. In the GitLab layer cake, everything from project planning to execution allows teams to collaborate together within a single tool.\n\nOur description of the GitLab layer cake is actually how GitLab is structured today: With groups at the top, followed by epics, and projects that have issues, templates, etc. All of the layers can work together to build a fluid workflow, or they can be used independently.\n\n\"So all of those pieces together can actually standalone or you can put them all together and it makes a really awesome process in a workflow,\" says JJ. \"You can actually have lots of teams working together to get something massive done, but you've broken it down into little pieces.\"\n\n## Project management within GitLab\n\nIf you want to start thinking about getting \"something massive done\" within GitLab consider these basic steps:\n\n*   **Create a framework**: Before diving into a new project, a good project manager will first define what the ideal state is and will then build a framework for achieving this ideal state.\n*   **Assign directly responsible individuals (DRIs)**: The PM will assign DRIs to different components of the project. Each DRI is responsible for that particular component and is the person that you can follow-up with regarding that component throughout the project.\n*   **Templatize repeated tasks**: Keep things efficient with templates.\n*   **Set service level agreements (SLAs) at each handoff point**: Think about the due date and work backward to sort out how long different tasks should be taking.\n*   **Write rules of engagement and fallback instructions**\n*   **Define the feedback process**: Ensure that you have a place for people to ask questions, and make the room to iterate as you go along.\n\nWhat does this look like in the real world? Our marketing team built a project management structure within GitLab that allows multiple teams to collaborate within the [marketing group](https://gitlab.com/gitlab-com/marketing). Each team (e.g., [corporate marketing](https://gitlab.com/gitlab-com/marketing/corporate-marketing)) has their own project, where other groups and projects can live.\n\n[Epics](https://docs.gitlab.com/ee/user/group/epics/) – which represent projects that contain multiple issues – also live at the marketing group level rather than living within smaller team projects. The [epics live at the marketing group level](/handbook/marketing/#issues-milestones-and-epics) because oftentimes multiple marketing teams (e.g., corporate marketing, product marketing, etc.) will be tagged in different issues within a particular epic.\n\n[Efficiency](https://handbook.gitlab.com/handbook/values/#efficiency) is another one of our values at GitLab and the marketing team created templates within different marketing teams for repeat tasks to keep processes more uniform and efficient.\n\nWe also created a unified, global view that allows us to track the progress of various marketing projects. We have four labels: work in progress (wip), plan, review, and scheduled, that are assigned to a marketing issue that indicates the various stages. The labels allow [Todd Barr](/company/team/#tbarr), our chief marketing officer, and anyone else on the marketing team to see a global overview of various issues within marketing as they move from the idea to completion phase.\n\n![unifiedview](https://about.gitlab.com/images/blogimages/gitlab-for-proj-management/labels.png){: .shadow.large.center}\nA global overview of all the activities happening in marketing, separated and labeled according to their current status.\n{: .note.text-center}\n\nThe marketing team uses two-tiers for our epics: the highest level is the ancestor (formerly called \"parent\") epic, and below that is the child epic. There can be multiple issues associated with the child epic, but an issue can only be associated with one epic.\n\n![epic-diagram](https://about.gitlab.com/images/blogimages/gitlab-for-proj-management/parent-child-epics.png){: .shadow.large.center}\nHow the marketing team uses ancestor epics and child epics.\n{: .note.text-center}\n\nNow that you understand the basics of GitLab and project management within GitLab, watch the video on executing sophisticated and integrated marketing programs.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/tbg8KSyIWVg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nAnd don’t miss the second part of this series where we put the spotlight on our internal successes using GitLab for project management.\n\nCover image by [Startaê Team](https://unsplash.com/@startaeteam) on [Unsplash](https://unsplash.com/s/photos/sticky-notes).\n{: .note}\n",[9,683,749],{"slug":3666,"featured":6,"template":686},"gitlab-for-project-management-one","content:en-us:blog:gitlab-for-project-management-one.yml","Gitlab For Project Management One","en-us/blog/gitlab-for-project-management-one.yml","en-us/blog/gitlab-for-project-management-one",{"_path":3672,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3673,"content":3679,"config":3685,"_id":3687,"_type":14,"title":3688,"_source":16,"_file":3689,"_stem":3690,"_extension":19},"/en-us/blog/gitlab-gdk-remote-development",{"title":3674,"description":3675,"ogTitle":3674,"ogDescription":3675,"noIndex":6,"ogImage":3676,"ogUrl":3677,"ogSiteName":670,"ogType":671,"canonicalUrls":3677,"schema":3678},"Contributor how-to: Remote Development workspaces and GitLab Developer Kit","This tutorial helps you get GDK working inside Remote Development workspaces to begin contributing to GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670563/Blog/Hero%20Images/cloudcomputing.jpg","https://about.gitlab.com/blog/gitlab-gdk-remote-development","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Contributor how-to: Remote Development workspaces and GitLab Developer Kit\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Raimund Hook\"}],\n        \"datePublished\": \"2023-07-31\",\n      }",{"title":3674,"description":3675,"authors":3680,"heroImage":3676,"date":3682,"body":3683,"category":791,"tags":3684},[3681],"Raimund Hook","2023-07-31","\nOpen source is fundamental to GitLab. We believe that [everyone can contribute](https://about.gitlab.com/company/mission/#mission).\nTypically, we recommend that anyone contributing anything more than basic changes to GitLab run the [GitLab Development Kit](https://gitlab.com/gitlab-org/gitlab-development-kit) (GDK). Because contributors can't always meet the GDK's resource demands, we're working to enable GDK inside the cloud-based GitLab Remote Development workspaces.\n\nIn this article, I'll explain how I used a Remote Development workspace running in my Kubernetes cluster to make working with the GDK faster and easier.\n\n## A preliminary note\nFirst, keep in mind that as of this writing the [Remote Development workspaces](https://about.gitlab.com/direction/create/ide/remote_development/) feature is still in Beta. My example here is therefore very much a proof of concept — and as such, it has some rough edges.\n\nBefore getting started, I followed the \"[Set up a workspace](https://docs.gitlab.com/ee/user/workspace/#set-up-a-workspace)\" prerequisites guide in the GitLab docs. For a more detailed set of instructions, see Senior Developer Evangelist Michael Friedrich's tutorial on [how to set up infrastructure for cloud development environments](https://about.gitlab.com/blog/set-up-infrastructure-for-cloud-development-environments/).\n\n## Getting started with workspaces\nTo start using workspaces, you will need a project configured with a `.devfile.yaml`. GitLab team members have curated [a number of example projects](https://gitlab.com/gitlab-org/remote-development/examples) you can review.\n\nInitially, I tried to do this with a fork of the GitLab project itself, but I ran into [some issues](https://gitlab.com/gitlab-org/gitlab/-/issues/414011) when the workspace begins cloning the repository.\n\nTo figure out what was causing my problems, I looked more closely at what happens behind the scenes when a workspace is created.\n\n## Behind the scenes with Remote Development workspaces\nWhen you create a new workspace, the following happens:\n1. The GitLab agent for Kubernetes creates a new namespace in your cluster. The agent dynamically generates a name for and assumes management of the namespace.\n1. Inside the namespace, a new deployment is created, specifying the container you chose in your `.devfile.yaml` as the image to use.\n1. This deployment is configured with some [init containers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) that perform some actions:\n    1. Cloning the repository into `/project/${project_path}`.\n    1. Injecting the VS Code server binary into your container.\n1. Once those init containers are complete, your container starts and the workspace becomes available.\n\n## The clone problem\nWhen cloning a repository, `git` tends to do much of the work in memory. This can be a challenge on larger projects/repositories, as it can require significant amounts of RAM. When cloning the GitLab project, for instance, git consumes approximately 1.6GB of RAM. This number is only going to increase with time. Sure, strategies like [shallow clones](https://git-scm.com/docs/git-clone#Documentation/git-clone.txt---depthltdepthgt) can help reduce this, but these are perhaps less suited to active use by a developer as they can increase the amount of time required to perform ongoing git operations.\n\nIn fact, creating a workspace using our `.devfile.yaml` in a fork of the GitLab project failed for this reason. The init container performing the clone is currently hard-limited to 128MiB of RAM, after which the memory management processes on the node kill the container.\n\nTo overcome this limitation, move the `.devfile.yaml` into the a fork of the root of the GDK repository. This project clones more quickly (and does so using fewer resources), so it's a  perfect starting point for running GDK itself. Another (bonus) advantage: You're then primed to contribute to the GDK itself, in addition to any of the other GitLab projects that the GDK clones.\n\n## Components of a GDK installation\nGDK clones the following projects from the GitLab 'family':\n* [GitLab](https://gitlab.com/gitlab-org/gitlab)\n* [Gitaly](https://gitlab.com/gitlab-org/gitaly)\n* [GitLab shell](https://gitlab.com/gitlab-org/gitlab-shell)\n\nThis allows you to work on any items in those directories as a part of your \"live\" installation.\n\n## Getting GDK installed and running in a workspace\nOnce I had a workspace up and running, my next step was to get GDK installed and running *in* that workspace. The GDK's documentation presents [several routes for doing this](https://gitlab.com/gitlab-org/gitlab-development-kit/#installation).\n\nA complete installation can take some time, as GDK needs to bootstrap itself and install a number of prerequisites. This is less than ideal in the context of a Remote Development workspace, as one of remote development's primary benefits is enabling access to a development environment rapidly. Requiring a user to bootstrap an environment that takes 50 minutes (or longer) doesn't help achieve this goal.\n\nTo combat this, I built a container image that effectively bootstraps and installs GDK, pre-building the GDK prerequisites and pre-seeding the database. This image and its associated tooling are currently [in review](https://gitlab.com/gitlab-org/gitlab-development-kit/-/merge_requests/3231).\n\n## Pre-building\nPre-building the container and running the bootstrap process on a scheduled basis allows us to perform that process once, without requiring the user to wait for something that can essentially be \"pre-canned\" for their use.\n\nOnce the workspace is running, we still need to \"reinstall\" the GDK environment with the latest version of our GitLab repository, but this step doesn't take quite as long as a complete bootstrap.\n\n## Generating a gdk.yml file\nTo work properly, GDK also requires a [`gdk.yml` file](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/main/doc/configuration.md#gdkyml). This file tells GDK how to configure GitLab to return the correct URLs and other items. To get GDK running in Remote Development, Rails needs to return URLs in a certain scheme (otherwise your browser won't know where to connect). To help this along, we [inject an environment variable](https://gitlab.com/gitlab-org/gitlab/-/issues/415328) into the workspace container. This variable helps us determine the URL in use (which is dynamically generated for each workspace).\n\nWe [now have a script](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/main/support/gitlab-remote-dev/remote-dev-gdk-bootstrap.sh?ref_type=heads) in GDK that will generate your `gdk.yml` file based on your workspace.\n\n## Creating our devfile\nThe contents of my `.devfile.yaml` looks like this:\n\n```yaml\nschemaVersion: 2.2.0\ncomponents:\n  - name: tooling-container\n    attributes:\n      gl/inject-editor: true\n    container:\n      # NB! This image is only in use until https://gitlab.com/gitlab-org/gitlab-development-kit/-/merge_requests/3231 is merged!\n      image: registry.gitlab.com/gitlab-org/gitlab-development-kit/gitlab-remote-workspace:stingrayza-gdk-remote-dev-add-container\n      memoryRequest: 10240M\n      memoryLimit: 16384M\n      cpuRequest: 2000m\n      cpuLimit: 6000m\n      endpoints:\n        - name: ssh-2222\n          targetPort: 2222\n        - name: gdk-3000\n          targetPort: 3000\n        - name: docs-3005\n          targetPort: 3005\n        - name: pages-3010\n          targetPort: 3010\n        - name: webpack-3808\n          targetPort: 3808\n        - name: devops-5000\n          targetPort: 5000\n        - name: jaeger-5778\n          targetPort: 5778\n        - name: objects-9000\n          targetPort: 9000\n        - name: shell-9122\n          targetPort: 9122\n```\n\nThis definition comes straight out of the [Workspace docs](https://docs.gitlab.com/ee/user/workspace/#devfile), and opens a number of ports that GDK uses. (For now, I've only tested the port `gdk-3000`, which is the the link to our instance of GDK.)\n\n## From Workspace to GDK\nOnce we have a project with a `.devfile.yaml`, our final step is to [create a new workspace](https://docs.gitlab.com/ee/user/workspace/#create-a-workspace).\n\nAs a part of this step, your cluster will pull the image as defined in the `.devfile.yaml` and start it up. For the GDK image we pre-built, this can take a few minutes.\n\nOnce the workspace is ready, the last step is to follow the link from the UI to connect to the workspace. This will open up a familiar VS Code IDE, with our GDK fork checked out.\n\nBut wait, where's GDK?\n\nWell, the pre-build did most of the work for us, but we still need to take a few final steps before we can claim that GDK is up and running. These have been built into a script we can run from the integrated terminal within the workspace.\n\nTo open a terminal, we can click on the VS Code Hamburger menu (top left), navigate to `Terminal` and select `New Terminal`.\n\nNow we execute the following script, which completes the setup and copies a couple of files over from the pre-built folders:\n\n```shell\nsupport/gitlab-remote-dev/remote-dev-gdk-bootstrap.sh\n```\n\nThis can take up to 15 minutes, but when it's done it should output the magic words — something like the following (note the 3000 in the URL; we specified that in the `.devfile.yaml` earlier):\n\n```shell\nSuccess! You can access your GDK here: https://3000-workspace-62637-2083197-apglwp.workspace.my-workspace.example.net/\n```\n\n## Connect to your GDK\nFollow the link as displayed using Cmd-click or Ctrl-click. After a couple of moments (GDK boot time), you should reach a familiar GitLab login screen.\n\nCongratulations! GDK is now running inside your Remote Development workspace.\n\nTo log in, type `gdk` in your terminal and you'll see the default admin credentials displayed near the bottom:\n\n```shell\n# Development admin account: xxxx / xxxx\n\nFor more information about GitLab development see\nhttps://docs.gitlab.com/ee/development/index.html.\n```\n\nLog into your GDK with the default credentials, change the admin user password, and you're all set!\n\n## Demo of workspace launch\nHere's a demo of launching a workspace in my personal cluster:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/iXq1NnTjnX0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## How to contribute to GitLab\nIn this article I explained how to get GDK up and running in Remote Development workspaces. This is not without its challenges, but the end result should mean that contributing to GitLab (especially in resource-constrained environments) is quicker and easier.\n\nDo you want to contribute to GitLab? Come and join in the conversation in the `#contribute` channel on GitLab's [Discord](https://discord.gg/gitlab), or just pop in and say \"hello.\"\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._",[978,9,683,1041,1515,682],{"slug":3686,"featured":6,"template":686},"gitlab-gdk-remote-development","content:en-us:blog:gitlab-gdk-remote-development.yml","Gitlab Gdk Remote Development","en-us/blog/gitlab-gdk-remote-development.yml","en-us/blog/gitlab-gdk-remote-development",{"_path":3692,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3693,"content":3699,"config":3707,"_id":3709,"_type":14,"title":3710,"_source":16,"_file":3711,"_stem":3712,"_extension":19},"/en-us/blog/gitlab-hashicorp-terraform-vault-pt-1",{"title":3694,"description":3695,"ogTitle":3694,"ogDescription":3695,"noIndex":6,"ogImage":3696,"ogUrl":3697,"ogSiteName":670,"ogType":671,"canonicalUrls":3697,"schema":3698},"GitLab and HashiCorp streamline delivery workflows","Discover how to leverage CI/CD for your infrastructure scripts with Terraform and GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670238/Blog/Hero%20Images/gitlab-terraform-pipelines.jpg","https://about.gitlab.com/blog/gitlab-hashicorp-terraform-vault-pt-1","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab and HashiCorp: Providing application and infrastructure delivery workflows\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kelly Hair\"},{\"@type\":\"Person\",\"name\":\"Anthony Davanzo\"}],\n        \"datePublished\": \"2019-09-17\",\n      }",{"title":3700,"description":3695,"authors":3701,"heroImage":3696,"date":3704,"body":3705,"category":679,"tags":3706},"GitLab and HashiCorp: Providing application and infrastructure delivery workflows",[3702,3703],"Kelly Hair","Anthony Davanzo","2019-09-17","\nA growing number of teams are becoming more and more invested in continually improving the business through iterative development. Adopting the culture of DevOps isn’t necessarily confined to software development itself, but is equally applicable to ITOps, System Admins, and other infrastructure teams as well. Just as a proper CI/CD workflow is the foundation of today’s application delivery, a similar automated workflow is essential for managing the delivery of infrastructure as well.\n\nAs developers try to become more agile in building, packing, and testing their applications, having the right CI/CD tool that is flexible to other automation use cases is critical. GitLab has gone into great detail about their [flexible CI/CD capabilities here](https://docs.gitlab.com/ee/ci/introduction/index.html#how-gitlab-cicd-works). What’s sometimes overlooked is implementing the proper CI/CD process for the underlying infrastructure that these applications rely on. In addition to application delivery, organizations need to consider what their infrastructure delivery process looks like. GitLab and HashiCorp have partnered to create a multi-blog series on how to combine the application delivery workflow with the infrastructure delivery workflow. In this part we will discuss a high-level overview of the solutions that we will dive deeper into in Part 2.\n\n## Leveraging HashiCorp Terraform for CI/CD Pipelines\n\n[HashiCorp Terraform](https://www.terraform.io/) is an open source tool for provisioning infrastructure as code. Users define infrastructure in HashiCorp Configuration Language (HCL) configuration files, Terraform reads those configurations, offers a speculative plan of what it will create, and then users confirm and apply those changes. Terraform keeps track of what infrastructure is provisioned in a state file.\n\nThe recently announced Terraform Cloud application provides users with additional automation and collaboration capabilities on top of Terraform, such as remotely managing and version that state file, executing Terraform runs (plan/apply) remotely, and allowing teams to comment and collaborate on Terraform. By remotely managing state files, Terraform Cloud empowers teams to work more quickly and safely in parallel without concerns of losing the file or overwriting each other's changes. These features are especially helpful for users implementing CI/CD pipelines because they allow users to interact with Terraform via webhooks/API instead of having Terraform run on a local machine.\n\nMost users will store their configuration files in a VCS (Version Control System) like GitLab and connect that VCS to Terraform Cloud. That connection allows users to borrow best practices from software engineering to version and iterate on infrastructure as code, using VCS and Terraform Cloud as a provisioning pipeline for infrastructure. Terraform will automatically run a plan upon changes to configuration files in a VCS. This plan can be reviewed by the team for safety and accuracy in the Terraform UI, then it can be applied to provision the specified infrastructure. Terraform Cloud can also be configured to automatically apply those changes.\n\nTerraform Cloud also includes a Governance upgrade, which provides access to the [Sentinel](https://www.hashicorp.com/sentinel) policy as code framework.  This framework allows users to define fine-grain rules and policies for their infrastructure that are automatically enforced before that infrastructure is provisioned. This allows users to work with the speed and efficiency they want in their continuous integration/delivery pipelines, while still ensuring that best practices are being implemented.\n\n### Future iterations\n\nIt is also worth discussing current work in progress with GitLab and Vault. Vault from Hashicorp secures, stores, and tightly controls access to tokens, passwords, certificates, API keys, and other secrets that services depend on. In efforts to improve [Variables and secrets management in GitLab CI/CD](https://gitlab.com/groups/gitlab-org/-/epics/816) we’re working with HashiCorp to provide a [first-class integration with Vault](https://gitlab.com/gitlab-org/gitlab-ce/issues/61053) sometime in the future.\n\n## Next steps\n\nAs a follow up, we will soon be posting a blog on the technical details of _how_ to build a Terraform pipeline in GitLab CI/CD.\n\nIn meantime, check out how [WagLabs reduced their release process from 40 minutes to just six](/blog/wag-labs-blog-post/), using Terraform and GitLab CI/CD!\n\n### About the authors\n\n_[Anthony Davanzo](https://www.linkedin.com/in/anthonydavanzo/) is the product marketing manager for Terraform Cloud at HashiCorp. In this role he focuses on bringing Terraform Cloud to market, hoping to drive adoption and spread awareness of the tool. His prior role as the technical product marketing manager for Terraform helps with deep domain knowledge and before HashiCorp, he was a product marketing manager at Cloudflare._\n\n_[Kelly Hair](/company/team/#khair1) is a solutions architect at GitLab._\n\nPhoto by [Saad Salim](https://unsplash.com/@saadx?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[109,1041,9,231,3173],{"slug":3708,"featured":6,"template":686},"gitlab-hashicorp-terraform-vault-pt-1","content:en-us:blog:gitlab-hashicorp-terraform-vault-pt-1.yml","Gitlab Hashicorp Terraform Vault Pt 1","en-us/blog/gitlab-hashicorp-terraform-vault-pt-1.yml","en-us/blog/gitlab-hashicorp-terraform-vault-pt-1",{"_path":3714,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3715,"content":3720,"config":3725,"_id":3727,"_type":14,"title":3728,"_source":16,"_file":3729,"_stem":3730,"_extension":19},"/en-us/blog/gitlab-helm-package-registry",{"title":3716,"description":3717,"ogTitle":3716,"ogDescription":3717,"noIndex":6,"ogImage":928,"ogUrl":3718,"ogSiteName":670,"ogType":671,"canonicalUrls":3718,"schema":3719},"Introducing the GitLab Helm Package Registry","Develop and deploy cloud native applications with a built-in Helm registry.","https://about.gitlab.com/blog/gitlab-helm-package-registry","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing the GitLab Helm Package Registry\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Chia\"}],\n        \"datePublished\": \"2021-07-26\",\n      }",{"title":3716,"description":3717,"authors":3721,"heroImage":928,"date":3722,"body":3723,"category":726,"tags":3724},[723],"2021-07-26","\n\nCloud native application architectures use containerization, microservices, and Kubernetes to run reliably at cloud-scale. With a built-in container registry and Kubernetes integration, GitLab is the best way to develop and deploy cloud native applications. [GitLab version 14.1](/releases/2021/07/22/gitlab-14-1-released/) also includes a Helm registry, which allows users to publish, install, and share Helm charts and packages from within our single application for the entire DevOps lifecycle.\n\n### What is Helm?\n\nHelm is a package manager for Kubernetes. A Chart is a Helm package that contains the resource definitions required to run an application inside a Kubernetes cluster. Helm allows you to manage complex applications by storing the application definition in a chart that can be versioned, shared, and collaborated on.\n\n### The differences between Helm Registry and Git\n\nWhy not simply store your Helm charts in a Git repository? After all, charts are YAML files that can be stored, versioned, and collaborated on like code.\n\nFor small projects and simple applications, it can be convenient to store the Helm chart in the same Git repository as the application code. However, this method starts to become unruly as the code scales. Applying this model with microservices architecture means you'd have many different charts spread out across many different repositories. Cluster-wide upgrades would certainly be a challenge. And sharing charts with other teams would require you to also grant permission to the code repository.\n\n### Comparing Helm registry and container registry\n\nAnother option for storing Helm charts is to use an OCI registry, like the GitLab Container Registry. However, this feature is new to Helm 3 and requires running Helm in experimental mode. Many organizations, especially those in highly regulated environments, prefer not to expose themselves to the additional risk of an experimental feature.\n\n### A built-in, dedicated Helm registry\n\nA Helm registry offers a centralized repository to store and share charts so large organizations can manage many complex applications in a controlled manner. The main benefits of having a dedicated registry are the security, efficiency, and reliability.\n\nWhen it comes to security, having all of the charts in one central location means they can be [systematically scanned for vulnerabilities](https://docs.gitlab.com/ee/user/application_security/sast/#supported-languages-and-frameworks). This is much more difficult to manage if your charts are stored in multiple locations. Similarly, user account and permission management is much easier to manage from a single location.\n\nA central registry also makes it much easier to distribute charts throughout your organization. Large organizations will often have a center of excellence that is responsible for creating, maintaining, and distributing charts to many different teams throughout the organization. Enabling a safe way to share charts and control access is critical.\n\nGitLab users can host all Helm charts from one central project, allowing users to control user access with SSO/SAML and authorization with deploy tokens, job tokens, or personal access tokens. Not to mention, the GitLab.com Package stage is 99.95% available.\n\n### How to get started\n\nThe new Helm Registry is currently at \"viable\" maturity. We do not recommended using it for production but it can be used for testing and planning. Visit the [Helm Repository docs](https://docs.gitlab.com/ee/user/packages/helm_repository/) for step-by-step commands to authenticate the registry and publish and install packages.\n\n### Contribute to the Helm Registry\n\nThe first iteration of the Helm registry was contributed to GitLab by community member [Mathieu Parent](https://gitlab.com/sathieu). We'd love your input and feedback and we continue to improve and mature the Helm registry capabilities. This [GitLab Epic outlines the path to make the Helm chart registry complete](https://gitlab.com/groups/gitlab-org/-/epics/6366). Comment in the epic and associated issues with your thoughts and feedback. As always, [code contributions](/community/contribute/development/) are welcome.\n",[749,9,916,1477],{"slug":3726,"featured":6,"template":686},"gitlab-helm-package-registry","content:en-us:blog:gitlab-helm-package-registry.yml","Gitlab Helm Package Registry","en-us/blog/gitlab-helm-package-registry.yml","en-us/blog/gitlab-helm-package-registry",{"_path":3732,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3733,"content":3739,"config":3744,"_id":3746,"_type":14,"title":3747,"_source":16,"_file":3748,"_stem":3749,"_extension":19},"/en-us/blog/gitlab-hero-devops-platform",{"title":3734,"description":3735,"ogTitle":3734,"ogDescription":3735,"noIndex":6,"ogImage":3736,"ogUrl":3737,"ogSiteName":670,"ogType":671,"canonicalUrls":3737,"schema":3738},"How a GitLab engineer changed the future of DevOps","When Kamil Trzciński suggested we integrate GitLab version control and GitLab CI one into a single product, GitLab's pioneering DevOps Platform was born.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681121/Blog/Hero%20Images/whatisgitlabflow.jpg","https://about.gitlab.com/blog/gitlab-hero-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How a GitLab engineer changed the future of DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2020-10-29\",\n      }",{"title":3734,"description":3735,"authors":3740,"heroImage":3736,"date":3741,"body":3742,"category":726,"tags":3743},[1609],"2020-10-29","\n\nJust recently, Gartner recognized [DevOps Value Stream Delivery](/solutions/value-stream-management/) Platforms as an emerging category in the software marketplace by publishing the new [Market Guide for DevOps Value Stream Delivery Platforms](https://page.gitlab.com/resources-report-gartner-market-guide-vsdp.html) (what we're calling a DevOps Platform). The Gartner report may not include the name \"Kamil Trzciński,\" but I want to recognize his contributions to this DevOps Platform category. If it weren't for his idea, we wouldn't have [launched GitLab as an all-in-one, single DevOps application](/blog/gitlab-master-plan/). It's a product that changed how engineers build software.\n\n**[[Learn more about our journey to the DevOps Platform](/blog/the-journey-to-a-devops-platform/)]**\n\nIt all started in 2015 with a GitLab runner that was built by one of the contributors from the wider community, [Kamil Trzciński](/company/team/#ayufan), who is now a distinguished engineer, Ops and Enablement, at GitLab. He wrote a runner that was faster, easier to run in parallel, easier to install, and easier to contribute to. We liked his runner so much that we deprecated ours to use his, and asked him to join our engineering team.\n\nAt that time, GitLab had two products: [GitLab Source Code Management](/solutions/source-code-management/) (SCM) and [GitLab Continuous Integration](/solutions/continuous-integration/) (CI). We were a DevOps company, but one with two key products that worked well together with some overlaps in code. Then Kamil made a suggestion that changed our company and has now defined a category: \"Why don't you combine the two to make GitLab a single application?\"\n\n[Dmitriy Zaporozhets](/company/team/#dzaporozhets), GitLab co-founder, thought there was no need to do it because the products were already perfectly integrated. And my gut reaction was no. Many of our customers were already building their own, DIY DevOps platforms with multiple tools. Combining GitLab SCM and GitLab CI would mean they got two tools where they expected only one. Our customers didn't seem to want an all-in-one tool, so why would we build it?\n\nBut as Kamil pointed out, there is a considerable amount of overlap between GitLab SCM and GitLab CI, and our engineers and users were spending a lot of development time and effort in managing functions and libraries that appeared in both technologies. In the end, we realized that it actually made a lot of engineering sense to build an all-in-one [DevOps platform](/solutions/devops-platform/). At first, our customers weren't sure about it – some even asked us to turn the CI function off in GitLab SCM because their engineers started using that over their official CI solution. But once we explained how much more efficient this made their application building efforts, they were sold. GitLab all-in-one meant one data store, fewer clicks, less context, and more efficiency in their application development processes. Kamil's idea was brilliant. Our developers were able to save development effort and didn't have to hop around between tools, same with the developers and operators who use GitLab to build their applications.\n\nWe wouldn't be where we are today if we didn't welcome the contributions of everyone in our globally distributed, open source software community. Just think, within one year, Kamil went from being a GitLab contributor who wanted to learn Go, to building a GitLab runner that blew us away, to redefining the **entire business strategy for our company**. It goes to show that companies are smarter when everyone can contribute.\n\nWatch the video below to hear Kamil describe how he came to join GitLab and made a proposal that went on to define the DevOps Platform category.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/CiJOTlU3wWs\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n_Gartner, Market Guide for DevOps Value Stream Delivery Platforms, Manjunath Bhat, Hassan Ennaciri, Chris Saunderson, Daniel Betts, Thomas Murphy, Joachim Herschmann, 28 September 2020_\n\n**[[Welcome to the DevOps Platform era](/blog/welcome-to-the-devops-platform-era/)]**\n\n_Gartner does not endorse any vendor, product or service depicted in its research publications, and does not advise technology users to select only those vendors with the highest ratings or other designation. Gartner research publications consist of the opinions of Gartner's research organization and should not be construed as statements of fact. Gartner disclaims all warranties, expressed or implied, with respect to this research, including any warranties of merchantability or fitness for a particular purpose._\n\nCover image by [Fabio Bracht](https://unsplash.com/@bracht?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/_z0DiiaIhB4)\n{: .note}\n",[9,267,728],{"slug":3745,"featured":6,"template":686},"gitlab-hero-devops-platform","content:en-us:blog:gitlab-hero-devops-platform.yml","Gitlab Hero Devops Platform","en-us/blog/gitlab-hero-devops-platform.yml","en-us/blog/gitlab-hero-devops-platform",{"_path":3751,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3752,"content":3757,"config":3763,"_id":3765,"_type":14,"title":3766,"_source":16,"_file":3767,"_stem":3768,"_extension":19},"/en-us/blog/gitlab-identified-by-gartner-as-eapt-visionary",{"title":3753,"description":3754,"ogTitle":3753,"ogDescription":3754,"noIndex":6,"ogImage":717,"ogUrl":3755,"ogSiteName":670,"ogType":671,"canonicalUrls":3755,"schema":3756},"GitLab earns visionary status in Gartner agile planning","We're happy to announce GitLab has been named a 'Visionary' in Gartner's EAPT Magic Quadrant.","https://about.gitlab.com/blog/gitlab-identified-by-gartner-as-eapt-visionary","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab named a 'Visionary' in 2019 Gartner Enterprise Agile Planning Tool Magic Quadrant\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Jeremiah\"}],\n        \"datePublished\": \"2019-05-22\",\n      }",{"title":3758,"description":3754,"authors":3759,"heroImage":717,"date":3760,"body":3761,"category":299,"tags":3762},"GitLab named a 'Visionary' in 2019 Gartner Enterprise Agile Planning Tool Magic Quadrant",[1198],"2019-05-22","\n\nGartner recently named [GitLab a ‘Visionary’](/analysts/gartner-eapt21/) in their Magic Quadrant research into Enterprise Agile Planning Tools. We believe that planning and delivery must be closely linked to enable product and project teams to streamline and accelerate delivery. In many organizations, disconnected tools create organizational islands, preventing teams from collaborating, sharing, and learning. Our vision for Concurrent DevOps is to enable teams to:\n\n- Bridge the gaps between PMs, Developers, Ops, and Security.\n- Build and manage their epics and roadmaps.\n- Prioritize work and organize sprints and Kanban boards to track the development and delivery of value to customers.\n\n## Everyone can contribute\n\nOur vision is to make it simple, easy, and fast for people to contribute and deliver value to their users. We believe that a [single application](/handbook/product/single-application/), preconfigured to work by default across the DevOps lifecycle, will enable faster cycle time, delivering innovation and value.\n\n## Easier workflows, increasing collaboration and productivity\n\nEnterprise Agile and Planning are critical activities that often determine the overall success of a project. Teams must work on the right things at the right time, and unless your planning processes are linked to your delivery actions, the potential for a disconnect is remarkably high.\n\nAt Hemmersbach, using GitLab helped them decrease the time from planning to production by 6.5 days. Working in a single environment, they are also achieving 60 builds per day where previously they were performing a single daily build.\n\n>“GitLab is the one tool that connects our whole team. You always see GitLab open and everything is based on GitLab. GitLab is the backbone of our software development.”  – Alexander Schmid, Head of Software Development, Hemmersbach\n\nGitLab solves the disconnect by enabling Enterprise Agile Planning within the same application that is used to manage the development and delivery.  Now, [Product Managers and Project Managers](/solutions/agile-delivery/) can groom their backlog and epics, build their roadmaps, and plan sprints without losing touch with the actual development and delivery flow. Kanban boards provide a visual and interactive way to manage the status and flow of issues through delivery.\n\n![burndown](https://about.gitlab.com/images/home/burndown-chart.png){: .shadow.medium.center}\n\n[Value Stream Management](/solutions/value-stream-management/) offers insight into planning and delivering projects so that teams can find and remove bottlenecks from their value stream.\n\nDownload the report and learn more about why Gartner named GitLab an Enterprise Agile Planning 'Visionary.'\n\n[Download the full report](/analysts/gartner-eapt21/)\n{: .alert .alert-gitlab-purple .text-center}\n\nGartner, Magic Quadrant for Enterprise Agile Planning Tools, 18 April 2019, Keith Mann, Mike West, Thomas Murphy, Nathan Wilson\n{: .note}\n\nGartner does not endorse any vendor, product or service depicted in its research publications, and does not advise technology users to select only those vendors with the highest ratings or other designation. Gartner research publications consist of the opinions of Gartner’s research organization and should not be construed as statements of fact. Gartner disclaims all warranties, express or implied, with respect to this research, including any warranties of merchantability or fitness for a particular purpose.\n{: .note}\n\nImage by \u003Ca href=\"https://pixabay.com/users/pisauikan-4552082/?utm_source=link-attribution&amp;utm_medium=referral&amp;utm_campaign=image&amp;utm_content=2682641\">pisauikan\u003C/a> from \u003Ca href=\"https://pixabay.com/?utm_source=link-attribution&amp;utm_medium=referral&amp;utm_campaign=image&amp;utm_content=2682641\">Pixabay\u003C/a>\n{: .note}\n",[855,9,728,726],{"slug":3764,"featured":6,"template":686},"gitlab-identified-by-gartner-as-eapt-visionary","content:en-us:blog:gitlab-identified-by-gartner-as-eapt-visionary.yml","Gitlab Identified By Gartner As Eapt Visionary","en-us/blog/gitlab-identified-by-gartner-as-eapt-visionary.yml","en-us/blog/gitlab-identified-by-gartner-as-eapt-visionary",{"_path":3770,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3771,"content":3777,"config":3782,"_id":3784,"_type":14,"title":3785,"_source":16,"_file":3786,"_stem":3787,"_extension":19},"/en-us/blog/gitlab-importers",{"title":3772,"description":3773,"ogTitle":3772,"ogDescription":3773,"noIndex":6,"ogImage":3774,"ogUrl":3775,"ogSiteName":670,"ogType":671,"canonicalUrls":3775,"schema":3776},"How to migrate data to GitLab using main importers","Learn about the capabilities of main importers, which are used to import data from external tools and from other GitLab instances.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679170/Blog/Hero%20Images/migration-data.jpg","https://about.gitlab.com/blog/gitlab-importers","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to migrate data to GitLab using main importers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2023-02-13\",\n      }",{"title":3772,"description":3773,"authors":3778,"heroImage":3774,"date":3779,"body":3780,"category":791,"tags":3781},[2120],"2023-02-13","\n\nA typical organization looking to adopt GitLab already uses many other tools. Artifacts such as code, build pipelines, issues, and epics will already exist and be changed daily. A seamless transition of work in progress is, therefore, critically important when importing data. GitLab importers aim to make this process easy and reliable, ensuring data is imported quickly and with maximum care.\n\nAt GitLab, a dedicated development team, named group:import, creates a seamless experience when importing data into GitLab or from one GitLab instance to another. This team continuously develops and improves the importing experience and keeps our importers up to date with new features and capabilities.\n\n## Migrate groups by direct transfer\n\nUsing group migration, you can import groups from one GitLab instance to another instance. The most common use case is to import groups from self-managed GitLab instances to GitLab.com (GitLab SaaS). With the group migration, you can migrate many groups in a single click.\n\n### Which items are imported?\n\nThe group migration imports the entire group structure, including all the sub groups and projects in them. Currently, to import projects as part of the group migration on self-managed GitLab, the administrator needs to enable the feature flag named `bulk_import_projects`. On GitLab.com, our SaaS offering, migration of both groups and projects is available. More information can be found in our [documentation](https://docs.gitlab.com/ee/user/group/import/#migrate-groups-by-direct-transfer-recommended).\n\nThe team continuously adds objects to the migration, but not all group items are imported. The docs cover the [items that are imported](https://docs.gitlab.com/ee/user/group/import/#migrated-group-items). \n\n### How can groups be imported?\n\nIt is very simple to import groups between two instances. Here are the steps: \n\n- Create a new group or subgroup in the designated instance \n- Select \"Import group\" \n- Connect to the remote instance with your [personal access token](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html)\n- Select the source groups you want to import \n- Click \"Import xyz groups\"\n\n![bulk_imports_v14_1](https://about.gitlab.com/images/blogimages/2022-11-15-gitlab-importers/bulk_imports_v14_1.png)\n\n## File-based import/export (the previously used method)\n\nGroup migration is the preferred method to migrate content from one GitLab instance to another, as it automates the process and you can import many groups in a single click. However, for some use cases, such as air-gapped networks when you don't have network connection between the two instances, or when you have environments with limited connectivity, the group migration won't help because it requires connection between the two instances. File-based export/import for [groups](https://docs.gitlab.com/ee/user/group/settings/import_export.html) and [projects](https://docs.gitlab.com/ee/user/project/settings/import_export.html) can be used when there is no connectivity between the instances. \n\nFile-based export/import is a manual process and requires a few steps in order to migrate each group or project. The file-based import/export is available from the UI and in the API. The team plans to disable it by a feature flag soon to encourage users to use group migration. However, you will be able to enable the feature flag in your instance if your use case requires the file-based import/export. More info can be found in this [issue](https://gitlab.com/gitlab-org/gitlab/-/issues/363406).\n\n## Import projects from external tools  \n\nGitLab has built-in support for import projects from [a variety of tools](https://docs.gitlab.com/ee/user/project/import/).\n\nThe GitHub importer is the most common importer and, therefore, the team invests a lot of effort to add more migrated components. GitLab and GitHub have different structure and architecture, so sometimes it is tricky to import objects from GitHub when the migrated components are implemented differently in GitLab. So the team needs to find creative ways to map some of the features or configurations. This is an example [epic](https://gitlab.com/groups/gitlab-org/-/epics/8585 ) with a proposal to map rules for protected branches when migrating GitHub protected rules. \n\n\n### What can be imported from GitHub to GitLab?\n\n- Repository description\n- Git repository data\n- Branch protection rules\n- Issues\n- Pull requests\n- Wiki pages\n- Milestones\n- Labels\n- Pull request review comments\n- Regular issue and pull request comments\n- Attachments for\n    - Release notes\n    - Comments and notes\n    - Issue description\n    - Merge Request description\n- Git Large File Storage (LFS) objects\n- Pull request reviews \n- Pull request “merged by” information \n- Pull request comments replies in discussions \n- Diff notes suggestions \n- Release note descriptions\n\nHere is a [full list of imported data](https://docs.gitlab.com/ee/user/project/import/github.html#imported-data).\n\nRead what's next in our [GitHub Epic](https://gitlab.com/groups/gitlab-org/-/epics/2984). \n\n### Repository by URL\n\nAn alternative way to import external projects is the Repository by URL option. You can import any Git repository through HTTP from the *Import Project* page, by choosing \"Repository by URL\".\n\nTo learn more about the Importer direction, roadmap, etc., refer to [Category Direction - Importers](/direction/manage/import_and_integrate/importers/).\n\n_Cover image by [Conny Schneider](https://unsplash.com/@choys_?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyTex) on [Unsplash](https://unsplash.com/s/photos/data-migration?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)_\n",[978,9,1040],{"slug":3783,"featured":6,"template":686},"gitlab-importers","content:en-us:blog:gitlab-importers.yml","Gitlab Importers","en-us/blog/gitlab-importers.yml","en-us/blog/gitlab-importers",{"_path":3789,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3790,"content":3795,"config":3800,"_id":3802,"_type":14,"title":3803,"_source":16,"_file":3804,"_stem":3805,"_extension":19},"/en-us/blog/gitlab-inc-takes-the-devops-platform-public",{"title":3791,"description":3792,"ogTitle":3791,"ogDescription":3792,"noIndex":6,"ogImage":928,"ogUrl":3793,"ogSiteName":670,"ogType":671,"canonicalUrls":3793,"schema":3794},"GitLab Inc. takes The DevOps Platform public","Today is the day GitLab Inc. takes The DevOps Platform public.","https://about.gitlab.com/blog/gitlab-inc-takes-the-devops-platform-public","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Inc. takes The DevOps Platform public\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2021-10-14\",\n      }",{"title":3791,"description":3792,"authors":3796,"heroImage":928,"date":3797,"body":3798,"category":769,"tags":3799},[1609],"2021-10-14","\nToday, GitLab Inc. announced the next milestone in our journey as we become a publicly traded company on the Nasdaq Global Market (NASDAQ: GTLB). GitLab was the first company to publicly live stream the entire end-to-end listing day at Nasdaq. \n\nIn a world where software defines the speed of innovation, every company must become a software company or they’ll be disrupted by a software company. We believe that GitLab, the DevOps Platform, helps companies to deliver software faster and more efficiently, while strengthening security and compliance. And it all happens inside our single platform where engineering, security, and operations teams can collaborate together. \n\nIn my [Founder’s Letter](#foundersletter), which you can read below, I told GitLab’s origin story. GitLab did not start in a tech incubator, garage, or Bay Area apartment. In 2011, my co-founder, Dmitriy Zaporozhets, created GitLab from his house in Ukraine. In 2012, I discovered GitLab from my home in the Netherlands on a tech news site. I thought that it was natural that a collaboration tool for developers was open source so people could contribute to it. As a Ruby developer, I was impressed by GitLab’s code quality, especially since it absorbed more than 300 contributions in the first year. In 2013, Dmitriy tweeted that he would like to work on GitLab full-time. After reading that tweet, I approached him, and we partnered so he could work on GitLab full-time. We incorporated GitLab Inc. in 2014 and applied to Y Combinator, a technology accelerator in Silicon Valley. In 2015, we participated in their program, and this greatly accelerated our business.\n\nTo ensure the quality of the GitLab application, Dmitriy built a second application, GitLab CI, to automatically test our code. In 2015, Kamil Trzciński, a member of the wider community, contributed a better version of the GitLab CI application so that it could run jobs in parallel. Dmitriy and I quickly made this new Runner the default version, and Kamil ended up joining the company. Kamil proposed integrating the two applications, which Dimitriy and I initially disagreed with. Thankfully, Kamil persisted in arguing for combining GitLab and GitLab CI into a single application. Dmitriy and I came around to Kamil’s point of view and the results were far better than anyone expected. The single application was easier to understand, faster to use, and enabled collaboration across functions. We had invented what we believed to be the first true DevOps platform and proceeded to build it out. \n\nToday, we believe that GitLab is the leading DevOps platform with an estimated 30 million registered users. GitLab's mission is to ensure that everyone can contribute. When everyone can contribute, users become contributors, and we greatly increase the rate of innovation. \n\n“GitLab also has more than 2,600 contributors in its open source community, which it lists as a competitive strength” - Stephanie Condon, ZDNet*\n\nWe are making progress toward our mission by elevating others through knowledge sharing, job access, and our software platform.\n\nGitLab’s values and underlying operational principles are core to our past, present, and future success. Most companies regress to the mean and slow down over time. We plan to maintain our startup ethos by continuing to do the following:\n\n- Reinforcing our values\n- Making quick, informed decisions\n- Designating a directly responsible individual (DRI) to own decision making for a workstream or initiative\n- Organizing informal communications\n- Challenging conventions and using boring solutions\n- Having a bias for action\n- Remembering we are an organization, not a family\n- Having time based releases\n- Supporting individual innovation through coaches and incubation\n- Dogfooding\n\nWe believe our approach has an impact on not only our business, but the industry as a whole. And we are not the only ones. \n\n“There are few companies that have had as positive an impact on the culture of an industry as @gitlab has.” - James Wise, Partner, Balderton on Twitter\n\nFrom day 1, we have co-created with the wider GitLab community, and together we have advanced the DevOps Platform. I am excited to keep building to make GitLab’s “everyone can contribute” mission a reality.\n\n## \u003Ca name=\"foundersletter\">\u003C/a> Founder’s Letter from the GitLab S-1\n\n## Letter From Our CEO\n\n**Origins**\n\nGitLab did not start in a tech incubator, garage, or Bay Area apartment. In 2011, my co-founder, Dmitriy Zaporozhets, created GitLab from his house in Ukraine. \n\nIn 2012, I discovered GitLab from my home in the Netherlands on a tech news site. I thought that it was natural that a collaboration tool for developers was open source so people could contribute to it. As a Ruby developer, I was impressed by GitLab’s code quality, especially since it absorbed more than 300 contributions in the first year. In 2013, Dmitriy tweeted that he would like to work on GitLab full-time. After reading that tweet, I approached him, and we partnered so he could work on GitLab full-time. We incorporated GitLab in 2014 and applied to Y Combinator, a technology accelerator in Silicon Valley. In 2015, we participated in their program, and this greatly accelerated our business.\n\n**DevOps Platform**\n\nTo ensure the quality of the GitLab application, Dmitriy built a second application, GitLab CI, to automatically test our code. In 2015, Kamil Trzciński, a member of the wider community, contributed a better version of the GitLab CI application so that it could run jobs in parallel. Dmitriy and I quickly made this new Runner the default version, and Kamil ended up joining the company.\n\nWhen Kamil proposed integrating the two applications, Dimitriy and I initially disagreed with him. Dmitriy felt that the applications were already integrated as well as two separate applications could be. And I believed that customers wanted to mix and match solutions. Thankfully, Kamil persisted in arguing for combining GitLab and GitLab CI into a single application. Dmitriy and I came around to Kamil’s point of view once we realized that combining the two applications would lead to greater efficiency for our team members and our users.\n\nThe results were far better than anyone expected. A single application was easier to understand, faster to use, and enabled collaboration across functions. We had invented what we believed to be the first true DevOps platform and proceeded to build it out. Kamil’s advocacy inspired GitLab’s “disagree, commit, and disagree'' sub-value. We allow GitLab team members to question decisions even after they are made. However, team members are required to achieve results on every decision while it stands, even while they are trying to have it changed.\n\n**Mission**\n\nGitLab's mission is to ensure that everyone can contribute. When everyone can contribute, users become contributors, and we greatly increase the rate of innovation. We are making progress toward our mission by elevating others through knowledge sharing, job access, and our software platform. We promote knowledge sharing through publishing how we operate in our handbook, an online repository of how we run the company that now totals more than 2,000 webpages. The lessons we have learned and put in the handbook are available to anyone with an internet connection. We contribute to job access by helping people with their tech careers and educating the world on remote work best practices. We believe that remote work is spreading job access more evenly across regions and countries. Our software platform brings together development, operations, and security professionals and makes it faster and more secure for them to innovate together.\n\n**Stewardship**\n\nMost of the time, when a company starts commercializing an open source software project, the wider community around the project shrinks. This has not been the case with GitLab. The wider community around GitLab is still growing. We are proud that GitLab is a co-creation of GitLab team members and users. We have ten stewardship promises that commit us to balancing the need to generate revenue with the needs of the open source project and the wider community. In our first year, we received just over 300 code contributions. Now, we frequently exceed this number in a single month.\n\n**Values**\n\nFrom the beginning of GitLab, we have been all-remote as the initial team members lived in the Netherlands, Ukraine, and Serbia. GitLab was founded before remote work was a proven model, so investors were worried about our ability to effectively manage the business and scale. That early skepticism required us to establish explicit mechanisms for value reinforcement. We now have over 20 mechanisms listed in our handbook. Some reinforcements are small. For example, team members have access to a Zoom background that showcases each of our values as icons. Others are more substantial. For example, every team member’s promotion document is structured around our values and shared with the entire company.\n\nGitLab’s values and underlying operational principles are core to our past, present, and future success. These values are:\n\n1. Results - This is the most important value in our values hierarchy as strong results enable us to keep doing the right things. If we have strong business momentum, we can continue to invest toward our ambitious, long-term mission. We care about what is achieved, not the hours worked. Since you get what you measure and reward, we do not encourage long hours and instead focus on results. For example, to discourage team members from focusing on hours worked, team members are discouraged from publicly thanking others for working long hours or late nights. This is intended to prevent pressure to work longer hours or highlighting longer hours as something that is rewarded.\n2. Collaboration - Team members must work effectively with others to achieve results. To encourage collaboration, we have about four group conversations per week. These are meetings in which departments at GitLab share their results with team members throughout the company. Group conversations enable all team members to understand and question every part of the business. This access to information and context supports collaboration.\n3. Efficiency - Working efficiently enables us to make fast progress, which makes work more fulfilling. For example, we only hold meetings when topics need to be discussed synchronously. When we do have a meeting, we share the discussion topics, the slide deck, and sometimes a recording of someone presenting the slide deck beforehand. This way we can dedicate the synchronous time of the meeting to discussion, not team members presenting material. We also have speedy meetings that are short, start on time, and end at least five minutes before the next one begins. We encourage team members to work together in public chat channels as much as possible instead of through direct messages. This makes information readily available to anyone who is interested or may become interested at a future point.\n4. Diversity, Inclusion, and Belonging (DIB) - We believe that team member diversity leads to better decisions and a greater sense of team member belonging. We spend more money than the industry average per hire to ensure we approach a diverse set of candidates. We have a DIB Program which includes Team Member Resource Groups (TMRGs), voluntary, team member-led groups, focused on fostering DIB within GitLab. I'm proud of team member driven initiatives such as mentoring for an advanced software engineering course at Morehouse College, a historically Black liberal arts school. We also do Reverse Ask Me Anything, meetings in which I ask questions of Team Member Resource Groups and get to learn from their experiences. We try to work asynchronously as much as possible to not be dependent on time zone overlap. This enables us to hire and work with people around the world from different cultures and backgrounds.\n5. Iteration - By reducing the scope of deliverables, we are able to complete them earlier and get faster feedback. Faster feedback gives us valuable information that guides what we do next. We measure and set targets for how many changes are expected from each engineering team. This encourages teams to reduce the scope of what they build and ship changes in smaller increments. We know that smaller changes are easier to review and less risky. The end result is that we are able to get more done as the higher frequency of changes more than compensates for the smaller size of them. We release features and categories even when they are minimally viable. We do not wait for perfection when we can offer something of value, get feedback, and allow others to contribute to features by refining and expanding upon them.\n6. Transparency - By making information public, we can reduce the threshold to contribute and make collaboration easier. In addition to our publicly shared handbook, we also livestream and share recordings of some of our meetings. I have CEO Shadows who attend all my GitLab meetings during a two week rotation. We are public about our strategy, risks, and product direction.\n\nThese are living values that are updated over time. In 2020 alone, we made 329 improvements to the GitLab Values page of our handbook.\n\n**Still a Startup**\n\nMost companies regress to the mean and slow down over time. We plan to maintain our startup ethos by doing the following:\n\n1. **Reinforcing our values**: We have more than 20 documented ways to reinforce GitLab’s values. Since hiring, bonuses, and promotions provide strong signals of what is valued and rewarded, we make values the lens through which we evaluate team member fit and advancement.\n2. **Quick and informed decisions**: We are able to combine the advantages of consensus organizations and hierarchical organizations by splitting decisions into two phases. In the data gathering phase, we employ the best of consensus organizations as we encourage people to contribute their ideas and opinions. In the decision phase, we benefit from the best of hierarchical organizations with one person, the directly responsible individual, deciding what to do without having to convince the people who made suggestions.\n3. **A directly responsible individual (DRI)**: A DRI is a single person who owns decision making authority and responsibility for the success of a given workstream or initiative. We avoid confusion and empower team members by being clear about the DRI. With a few documented exceptions, the person who does the work resulting from the decision gets to make the decision. DRIs tend to have the context required for good decision making and are empowered by their ability to use their own judgement in doing what is best for the business.\n4. **Organize informal communications**: Informal team member communications, such as a chat about life outside of work, are necessary for building trust. Trust is essential for great business results. Many businesses invest heavily in offices and facilities, because they believe offices are necessary for informal communication.\n\nDuring the pandemic, many businesses that were forced to work remotely discovered that productivity increased. Many of these same businesses are now making plans to return to the office. One reason being given for the return to the office is that not everyone can work from home. We solve this by allowing people to rent work space. The other main reason given is that people miss working from a central office with co-workers. I don’t think that people miss the commute or the office furniture. They miss informal communication. Central offices are a really expensive, inconvenient, and indirect way to facilitate information communication. It is more efficient to directly organize informal communication.\n\nFor example, every person who joins GitLab has to schedule at least five coffee chats during their onboarding. We also have social calls, Ask Me Anything meetings with senior leaders, and 15 other explicit ways to encourage employee connections and relationship building. Intentionally organizing informal communication enables the trust-building conversations that are essential for collaboration. This can be more effective than relying on chance encounters in an office building. You can connect with team members throughout the world and across departments through a coffee chat. You may not meet people outside of your own floor in an office setting.\n\n5. **Challenge conventions**: We do not do things differently for the sake of being different, and we use boring solutions whenever possible. That said, we're also willing to deviate from conventions when it can benefit GitLab and the wider community. Before the COVID-19 pandemic, we believe GitLab was the largest all-remote company in the world. We now teach others how to succeed as remote companies and employees. We aim to be the most transparent company of our size. This transparency has had demonstrable benefits ranging from increased team member productivity to enhanced brand awareness. What some saw as a liability, we have shown to be a strength.\n6. **Bias for action**: Decisions should be thoughtful, but delivering fast results requires the fearless acceptance of occasionally making mistakes. Our bias for action may result in the occasional mistake, but it also allows us to course correct quickly. We keep the stakes low for mistakes for the sake of transparency. When people are comfortable communicating missteps, risk aversion and secrecy don’t become the norm.\n7. **Not a family**: Some companies talk about being a 'Family.' We don't think that is the right perspective. At GitLab, the relationship is not the end goal. The goal is results. We are clear about accountability and hold people to a clearly articulated standard. When people do not perform, we try to help them improve. If they still can’t meet expectations, we let them go.\n8. **Time based release**: We have introduced a new, enhanced version of our software on the 22nd of every month for over nine years. A time based release ensures that when a feature is ready, its release will not be held up by another that is not. Aligned with our value of iteration, we try to reduce the scope of each feature so that it fits in a single release.\n9. **Individual innovation**: We empower individuals to innovate. For example, we have designated coaches who support contributors from the wider community in getting their contributions to the point where they can be merged by GitLab. We also have an incubation department dedicated to quickly turning ideas into viable features and products.\n10. **Dogfooding**: The best way to quickly improve GitLab is to use it ourselves, or dogfood it, so that we have a quick feedback loop. We use our own product even when a feature is in its early stages of development. This helps us to develop empathy with users and better understand what to build next.\n\n## Long-Term Focus\n\nMore than 40 million software professionals are driving change through software, and this number is growing. These software professionals are rapidly adopting DevOps to accelerate this change. Gartner predicts that by 2023, 40% of organizations will have switched from multiple point solutions to DevOps value stream delivery platforms to streamline application delivery, versus less than 10% in 2020. I believe that 40% is just the beginning, and almost all organizations will eventually use a DevOps Platform. GitLab has a unique opportunity to lead the DevOps Platform market and shape innovation.\nWith a large addressable market, GitLab plans to optimize for long term growth--even if it comes at the expense of short-term profitability. This means that we may not make a profit for a long time as we need to weigh profitability against the clear opportunity to pursue larger, future returns.\n\n## Closing\n\nWith the wider GitLab community, we have created and advanced the DevOps Platform. I am excited to keep building to make GitLab’s “everyone can contribute” mission a reality. I look forward to welcoming investors who share our enthusiasm for collaboration and innovation.\n\n* 2,600 contributors as of July 31, 2021\n",[726,9],{"slug":3801,"featured":6,"template":686},"gitlab-inc-takes-the-devops-platform-public","content:en-us:blog:gitlab-inc-takes-the-devops-platform-public.yml","Gitlab Inc Takes The Devops Platform Public","en-us/blog/gitlab-inc-takes-the-devops-platform-public.yml","en-us/blog/gitlab-inc-takes-the-devops-platform-public",{"_path":3807,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3808,"content":3813,"config":3818,"_id":3820,"_type":14,"title":3821,"_source":16,"_file":3822,"_stem":3823,"_extension":19},"/en-us/blog/gitlab-incident-management",{"title":3809,"description":3810,"ogTitle":3809,"ogDescription":3810,"noIndex":6,"ogImage":928,"ogUrl":3811,"ogSiteName":670,"ogType":671,"canonicalUrls":3811,"schema":3812},"Downtime happens, but GitLab Incident Management can help","GitLab's DevOps Platform doesn't just make it easy to release safe software faster, it also streamlines the process for problem solving. Here's a deep dive into GitLab Incident Management.","https://about.gitlab.com/blog/gitlab-incident-management","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Downtime happens, but GitLab Incident Management can help\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2021-11-30\",\n      }",{"title":3809,"description":3810,"authors":3814,"heroImage":928,"date":3815,"body":3816,"category":769,"tags":3817},[2120],"2021-11-30","\n\nDowntime is expensive and the cost is growing. Software reliability is as important as the product itself – it doesn't matter what your product can do if your customers can't reliably access it. GitLab's Incident Management is built-in to our [DevOps Platform](/solutions/devops-platform/) and empowers teams with adaptable practices and a streamlined workflow for triage and resolving incidents. We offer tools that provide access to observability resources, such as metrics, logs, errors, runbooks, and traces, that foster easy collaboration across response teams, and that support continuous improvement via post-incident reviews and system recommendations. Here's a look at how it all works.\n\n## The costs of being down\n\nDowntime can cost companies hundreds of thousands of dollars in a single hour. Avoiding downtime is critical for organizations. Companies need to invest time, establish processes and culture around managing outages, and have processes to resolve them quickly. The larger an organization becomes, the more distributed their systems. This distribution leads to longer response times and more money lost. Investing in the right tools and fostering a culture of autonomy, feedback, quality, and automation leads to more time spent innovating and building software. If done well, teams will spend less time reacting to outages and racing to restore services. The tools your [DevOps](/topics/devops/) teams use to respond during incidents also have a huge effect on MTTR (Mean Time To Resolve, also known as Mean Time To Repair).  \n\n## What is an incident? \n\nIncidents are anomalous conditions that result in — or may lead to — service degradation or outages. Those outages can impact employee productivity, and decrease customer satisfaction and trust. These events require human intervention to avert disruptions or restore service to operational status. Incidents are always given attention and resolved.\n\n## What is Incident Management? \n\nIncident Management is a process which is focused on restoring services as quickly as possible and proactively addressing early vulnerabilities and warnings, all while keeping employees productive and customers happy. \n\n## Meet GitLab Incident Management \n\n[GitLab Incident Management](https://docs.gitlab.com/ee/operations/incident_management/) aims to decrease the overhead of managing incidents so response teams can spend more time actually resolving problems. We accelerate problem resolution through efficient knowledge sharing in the same tool they already use to collaborate on development. Enabling teams to quickly gather resources in one central, aggregated view gives the team a single source of truth and shortens the MTTR. \n\nGitLab’s built-in Incident management solution provides tools for the triage, response, and remediation of incidents. It enables developers to easily triage and view the alerts and incidents generated by their application. By surfacing alerts and incidents _where the code is being developed_, problems can be resolved more efficiently. \n\n## Why Incident Management within GitLab?\n\nGitLab is a [DevOps Platform](/solutions/devops-platform/), delivered as a single application. As such, we believe there are additional benefits for DevOps users to manage incidents within GitLab.\n\n1. Co-location of code, CI/CD, monitoring tools, and incidents reduces context switching and enables GitLab to correlate what would be disparate events or processes within one single control pane.\n\n2. The same interface for development collaboration and incident response streamlines the process. The developers who are on-call can use the same interface they already use every day; this prevents the incident responders from having to use a tool they are unfamiliar with and thus hampering their ability to respond to the incident.\n\n## How to manage incidents in the GitLab DevOps Platform\n\n### Create an incident manually or automatically \n\nYou can create incidents manually or enable GitLab to create incidents automatically whenever an alert is triggered. If you use PagerDuty for incidents, you can [set up a webhook with PagerDuty](https://docs.gitlab.com/ee/operations/incident_management/incidents.html#create-incidents-via-the-pagerduty-webhook) to automatically create a GitLab incident for each PagerDuty incident. \n\n![pd](https://about.gitlab.com/images/blogimages/incident-mgmt/pager.png)\n\n### Alert Management \n\n[Alerts](https://docs.gitlab.com/ee/operations/incident_management/alerts.html) are a critical entity in incident management workflow. They represent a notable event that might indicate a service outage or disruption. GitLab can accept alerts from any source via a webhook receiver. GitLab provides a list view for triage and detail view for deeper investigation of what happened.\n\n![alert](https://about.gitlab.com/images/blogimages/incident-mgmt/alert.png)\n\n### On-Call Schedules\n\nTo maintain the availability of your software services you need to schedule on-call teams. [On-call schedule management](https://docs.gitlab.com/ee/operations/incident_management/oncall_schedules.html) is being used to create schedules for responders to rotate on-call responsibilities. Within each schedule you can add team members to rotations that last hours, weeks or days depending on your team's needs. Some teams need to be on-call just during business hours, while others have someone on-call 24/7, 365; every team is different.  \n\n![on-call](https://about.gitlab.com/images/blogimages/incident-mgmt/on-call.png)\n\n### Escalation Policies\n\n[Escalation Policies](https://docs.gitlab.com/ee/operations/incident_management/escalation_policies.html) determine when users on-call get notified and what happens if they don’t respond. They are the if/then logic that use on-call schedules to make sure teams never miss an incident. You can create an escalation policy in the GitLab project where you manage on-call schedules.\n\n![escalation](https://about.gitlab.com/images/blogimages/incident-mgmt/escalation.png) \n\n### Paging and Notifications \n\nWhen there is a new alert or incident, it is important for a responder to be notified immediately so they can triage and respond to the problem. GitLab Incident Management supports email notifications, with plans to add Slack notifications, SMS, and phone calls. \n\n\n\n\n\n\n",[9,749,1040],{"slug":3819,"featured":6,"template":686},"gitlab-incident-management","content:en-us:blog:gitlab-incident-management.yml","Gitlab Incident Management","en-us/blog/gitlab-incident-management.yml","en-us/blog/gitlab-incident-management",{"_path":3825,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3826,"content":3832,"config":3837,"_id":3839,"_type":14,"title":3840,"_source":16,"_file":3841,"_stem":3842,"_extension":19},"/en-us/blog/gitlab-is-now-an-approved-slp-vendor-in-california",{"title":3827,"description":3828,"ogTitle":3827,"ogDescription":3828,"noIndex":6,"ogImage":3829,"ogUrl":3830,"ogSiteName":670,"ogType":671,"canonicalUrls":3830,"schema":3831},"GitLab is now an approved SLP vendor in California","State and local agencies in California can now purchase GitLab licenses at an agreed-upon discount.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668402/Blog/Hero%20Images/code-gitlab-tanuki.png","https://about.gitlab.com/blog/gitlab-is-now-an-approved-slp-vendor-in-california","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab is now an approved SLP vendor in California\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-04-19\",\n      }",{"title":3827,"description":3828,"authors":3833,"heroImage":3829,"date":3834,"body":3835,"category":769,"tags":3836},[745],"2022-04-19","GitLab is now an approved vendor under the Software Licensing Program (SLP) with the state of California. This contract allows state and local agencies, including educational institutions in California, to purchase GitLab software licenses at an agreed-upon discount, reducing costs and streamlining the procurement process. Under the contract, agencies will have greater access to GitLab’s complete DevOps solution, which empowers organizations to deliver software faster and more efficiently.\n\nEstablished in 1994, [California’s SLP](https://www.dgs.ca.gov/PD/About/Page-Content/PD-Branch-Intro-Accordion-List/Acquisitions/Software-Licensing-Program) is managed by the Procurement Division of the Department of General Services. The program provides government agencies and institutions with discounted rates for software licenses and upgrades, reducing the need for individual departments to conduct repetitive acquisitions. \n\n“There’s an exciting opportunity for public sector agencies to benefit from automated DevOps practices,” says [Bob Stevens](/company/team/#bstevens1), GitLab’s area vice president for Public Sector Federal. “This contract makes it simpler and more cost-effective for agencies to adopt The DevOps Platform, and deliver more resilient and efficient applications while keeping security at the forefront.”  \n\nGitLab believes that this contract, which makes The DevOps Platform more accessible and cost-effective, will expedite the broader adoption of DevOps in the [public sector](/solutions/public-sector/). GitLab’s single application will enable greater collaboration within public sector agencies, allowing teams to partner on planning, building, securing, and deploying software. \n\nTo streamline the process, GitLab will work with channel partners including [Acuity Technical Solutions](https://www.acuitytechnical.com), [Launch Consulting](https://www.launchconsulting.com) and [Veteran Enhanced Technology Solutions](https://veteranets.com/). \n\n“Public sector agencies are under tremendous pressure to transform and streamline their software development processes,” said [Michelle Hodges](/company/team/#mwhodges), GitLab’s vice president of global channels. “We’re proud to extend the power of our platform to a new network of customers via trusted channel partners and to help evolve the ways in which they collaborate on and deliver software.”",[9,282,875],{"slug":3838,"featured":6,"template":686},"gitlab-is-now-an-approved-slp-vendor-in-california","content:en-us:blog:gitlab-is-now-an-approved-slp-vendor-in-california.yml","Gitlab Is Now An Approved Slp Vendor In California","en-us/blog/gitlab-is-now-an-approved-slp-vendor-in-california.yml","en-us/blog/gitlab-is-now-an-approved-slp-vendor-in-california",{"_path":3844,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3845,"content":3850,"config":3855,"_id":3857,"_type":14,"title":3858,"_source":16,"_file":3859,"_stem":3860,"_extension":19},"/en-us/blog/gitlab-is-setting-standard-for-devsecops",{"title":3846,"description":3847,"ogTitle":3846,"ogDescription":3847,"noIndex":6,"ogImage":928,"ogUrl":3848,"ogSiteName":670,"ogType":671,"canonicalUrls":3848,"schema":3849},"GitLab is setting the standard for DevSecOps","GitLab has been recognized as a challenger in the 2021 Gartner Magic Quadrant for Application Security Testing","https://about.gitlab.com/blog/gitlab-is-setting-standard-for-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab is setting the standard for DevSecOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cindy Blake\"}],\n        \"datePublished\": \"2021-06-01\",\n      }",{"title":3846,"description":3847,"authors":3851,"heroImage":928,"date":3852,"body":3853,"category":726,"tags":3854},[1921],"2021-06-01","\nDebate always swirls over what term is best: [DevSecOps](/topics/devsecops/), SecDevOps, or even to drop “sec” altogether since it should be ubiquitous. At GitLab, we have a strong point of view on this: DevSecOps places security in the middle of the [DevOps](/topics/devops/) effort, which is where it rightly belongs. While transparent and a natural part of the process, it should be top of mind and not buried. Security needs to be everyone’s responsibility. With automation of processes and policies, it can be much easier to arm developers and security pros alike with the information they need in order to meet this responsibility.\n\nOur DevSecOps platform is the end-to-end secure software solution that helps you plan, create, deploy, secure, and manage your modern software and the infrastructure upon which it relies. GitLab provides the visibility and controls necessary to protect the integrity of your software factory and its deliverables.\n\nGitLab has been a catalyst for change when it comes to the evolution of DevSecOps versus traditional application security testing. Let’s look at what this evolution entails:\n\nSecurity testing\n\n*   Old: Security testing is performed by security pros, using their own tools, usually at the end of a development cycle.\n*   New: Security testing is automated within the CI pipeline with findings delivered to the developer while they are still iterating on their code. Findings are limited to new vulnerabilities introduced in this code change making it highly clear and actionable for the developer to correct the security flaws that they created without taking responsibility for the backlog of flaws and technical debt that already existed.\n\nCI and security \n\n*   Old: CI scripts might be used to call security scanners and pull the findings into the CI pipeline. Yet the two tools remain separated. Often elements are missing and integration must be maintained. Licenses of the CI tool and the scanners are separate and can be difficult to manage, especially when they charge by different variables (users, apps, code size).\n*   New: United into a single tool, there is no costly integration to maintain and only a single license to manage. \n\nRemediation\n\n*   Old: Security pros must constantly track remediation status of critical vulnerabilities (risk). The findings are in one tool, but the remediation effort is within the development team putting the two teams in a constant state of friction and inefficient communications. \n*   New: By sharing a single tool, security pros can see the status of remediation for given vulnerabilities right in their dashboard. And, when GitLab issues are used, both teams can collaborate to work together on remediation.\n\nWe are proud of our influence on this evolution and of our progress. We invite you to learn more about the capabilities and benefits of [GitLab security and compliance](https://about.gitlab.com/solutions/security-compliance/).\n\nGitLab has been [recognized as a challenger](/analysts/gartner-ast21/) in the 2021 Gartner Magic Quadrant for Application Security Testing for its ability to execute and completeness of vision. We believe this is a nod to the value of getting scan results into the hands of those who can do the remediation. Gartner opens their Magic Quadrant report by saying, “Modern application design and the continued adoption of DevSecOps are expanding the scope of the AST market. Security and risk management leaders can meet tighter deadlines and test more complex applications by seamlessly integrating and automating AST in the software delivery life cycle.”  We agree.\n\nGitLab is the first to provide many scan types to the developer including SAST, DAST, dependency scanning, container scanning, secrets detection, license  compliance, API fuzzing, and coverage-guided fuzzing. We offer dependency lists and vulnerability management as well. We are regularly replacing many incumbent app sec vendors. We feel that Microsoft probably remains our biggest competitor thanks to their broad licensing agreements even though [our security capabilities are more complete](https://about.gitlab.com/competition/github/).\n\nLast year, [GitLab acquired Peach Tech and Fuzz It](https://about.gitlab.com/press/releases/2020-06-11-gitlab-acquires-peach-tech-and-fuzzit-to-expand-devsecops-offering.html), each offering a different approach to fuzzing. This week, we’re proud to announce another acquisition of security capabilities, this time of Machine Learning technology. We are bringing innovation to a rather tired app sec industry by not only adding value to the development team, but by bringing new scanning methods to meet the demands of modern application architectures. You’ll hear about these and also innovative capabilities from our partners at [GitLab Commit](https://about.gitlab.com/events/commit/), our online user conference, in August.\n\nSince responding to Gartner’s questionnaire (that used GitLab version 13.6), we have had 5 monthly releases with numerous security features. Some of the most significant capabilities added after our Magic Quadrant submission include: \n\n*   [Compliant pipeline configurations](https://about.gitlab.com/releases/2021/04/22/gitlab-13-11-released/#compliance-pipeline-configurations) using [Compliance Frameworks](https://docs.gitlab.com/ee/user/project/settings/index.html#compliance-frameworks), \n*   [Security Alert Dashboard](https://about.gitlab.com/releases/2021/02/22/gitlab-13-9-released/#security-alert-dashboard-for-container-network-policy-alerts) for [container network policies](https://docs.gitlab.com/ee/user/application_security/container_scanning/) along with [on-call schedule management](https://about.gitlab.com/releases/2021/04/22/gitlab-13-11-released/#on-call-schedule-management), \n*   [Bulk vulnerability status updates](https://about.gitlab.com/releases/2021/03/22/gitlab-13-10-released/#vulnerability-bulk-status-updates) and other vulnerability management enhancements, \n*   [Admin Mode](https://about.gitlab.com/releases/2021/04/22/gitlab-13-11-released/#re-authenticate-for-gitlab-administration-with-admin-mode) to reverify admin credentials,  \n*   [Semgrep ](https://about.gitlab.com/releases/2021/04/22/gitlab-13-11-released/#gitlab--semgrep-upgrading-sast-for-the-future)for custom detection rules, \n*   [custom certificates](https://about.gitlab.com/releases/2021/04/22/gitlab-13-11-released/#support-for-custom-ca-certs-when-using-the-release-cli), \n*   [email alerts for key expirations](https://about.gitlab.com/releases/2021/04/22/gitlab-13-11-released/#ssh-key-expiration-email-notification), \n*   [enforcing SAML for Git activity](https://about.gitlab.com/releases/2021/04/22/gitlab-13-11-released/#saml-enforcement-for-git-activity).  \n*   [On-demand DAST](https://docs.gitlab.com/ee/user/application_security/dast/#on-demand-scans), and\n*   A [new browser-based crawler for DAST](https://about.gitlab.com/releases/2021/05/22/gitlab-13-12-released/#new-browser-based-crawler-for-dast-in-open-beta) for coverage greater than the current proxy-based crawler.\n\nIn the last year, we have also added Professional Services capabilities for [security training](https://www.google.com/url?q=https://about.gitlab.com/services/education/security-essentials/&sa=D&source=editors&ust=1622175048426000&usg=AOvVaw3kq5901QTQ8sahHBjQhuNV), [migration](https://www.google.com/url?q=https://about.gitlab.com/services/migration/enterprise/&sa=D&source=editors&ust=1622175048427000&usg=AOvVaw3rRtz8pZmVx__aOMqEdIgH), and [advisory services](https://www.google.com/url?q=https://about.gitlab.com/services/advisory/&sa=D&source=editors&ust=1622175048429000&usg=AOvVaw2wgpPAo02Sx3o8Cg8ng9nd). \n\nApplication security is about to get even more visibility. After the SolarWinds attack and the following gas pipeline attack, the importance of securing the software factory to deliver code safely has become top of mind for anyone dealing with the U.S. Federal Agencies. President Biden’s [Executive Order on Improving the Nation’s Cybersecurity](https://www.whitehouse.gov/briefing-room/presidential-actions/2021/05/12/executive-order-on-improving-the-nations-cybersecurity/) will have far-reaching consequences outside of the government and outside of the United States. GitLab is uniquely positioned to help meet these challenges. This blog on [Securing your Software Supply Chain](https://lnkd.in/gT5QrrU) provides insight into how you can apply common controls that reach beyond simple application security scanning. In [this webinar,](https://www.youtube.com/watch?v=7xd1bBOn9JI) we provide 6 steps to get you started, along with a demo of the relevant features in action.\n\nTrue DevSecOps represents a new era of software security with a much broader scope than traditional App Sec. We believe GitLab has led the market toward this evolution and will continue to do so. \n\nAttribution: Gartner Magic Quadrant for Application Security Testing, Dale Gardner, Mark Horvath, Dionisio Zumerle, 27th May, 2021. \n{: .note .font-small .margin-top40}\n\nGartner does not endorse any vendor, product or service depicted in its research publications, and does not advise technology users to select only those vendors with the highest ratings or other designation. Gartner research publications consist of the opinions of Gartner’s research organization and should not be construed as statements of fact. Gartner disclaims all warranties, express or implied, with respect to this research, including any warranties of merchantability or fitness for a particular purpose.\n{: .note .font-small}\n",[9,875],{"slug":3856,"featured":6,"template":686},"gitlab-is-setting-standard-for-devsecops","content:en-us:blog:gitlab-is-setting-standard-for-devsecops.yml","Gitlab Is Setting Standard For Devsecops","en-us/blog/gitlab-is-setting-standard-for-devsecops.yml","en-us/blog/gitlab-is-setting-standard-for-devsecops",{"_path":3862,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3863,"content":3869,"config":3874,"_id":3876,"_type":14,"title":3877,"_source":16,"_file":3878,"_stem":3879,"_extension":19},"/en-us/blog/gitlab-is-the-single-source-of-truth-for-ecommerce-provider",{"title":3864,"description":3865,"ogTitle":3864,"ogDescription":3865,"noIndex":6,"ogImage":3866,"ogUrl":3867,"ogSiteName":670,"ogType":671,"canonicalUrls":3867,"schema":3868},"GitLab is the single source of truth for eCommerce provider","Swell uses GitLab company-wide and says the biggest advantage so far is the review operations capability.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668755/Blog/Hero%20Images/swelllogo3.png","https://about.gitlab.com/blog/gitlab-is-the-single-source-of-truth-for-ecommerce-provider","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab is the single source of truth for eCommerce provider\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-06-23\",\n      }",{"title":3864,"description":3865,"authors":3870,"heroImage":3866,"date":3871,"body":3872,"category":769,"tags":3873},[745],"2022-06-23","eCommerce platform provider [Swell](https://www.swell.is) was built to give entrepreneurs the opportunity to build the online business that they envision. A GitLab customer since 2021, GitLab has been adopted as Swell's one DevOps, project management, and support ticketing tool for the whole organization. It's the foundational platform that the business works on.\n\nSwell is using GitLab Premium in many different areas, including for product development and to build the platform infrastructure, says Nico Bistolfi, vice president of technology.\n\n\"GitLab is our source of truth for everything,\" Bistolfi says. Now, Swell is looking into expanding its usage of the platform to leverage features such as code quality, automation, and other types of dynamic application security and static application security.\n\n## GitLab for CI/CD\nSwell upgraded to the Premium version and the biggest advantage so far πpath-to-decomposing-gitlab-database-part2has been the review operations capability, Bistolfi says. The company has created environments for every merge request users make, and that replicates in production for testers to see what was changed, whether a fix was made, or how the new feature is working.\n\n\"We could not go to our software development lifecycle today without the review ops. That's something that is critical for us,\" Bistolfi says.\n\nGitLab is used for both continuous integration (CI) and continuous deployment (CD). While building the [CI/CD](/topics/ci-cd/) pipeline process is ongoing, Bistolfi says, “We are slowly changing it and relying more and more on GitLab” in areas, including application security.\n\nBefore moving to GitLab, Swell was using bare-metal servers. The company now uses GitLab’s container management solutions and all API updates are happening through the platform.\n\n## From inputting issues to resolution\nEveryone at Swell is using GitLab — not just developers — and for a variety of tasks. The company has created a way to process support tickets through the platform. Another use case is knowledge management.\n\n\"We find ourselves making some decisions from comments in GitLab,\" he says. The whole process from the time a ticket is created to being resolved is done within the platform.\n\nThe company culture is about full information transparency, Bistolfi says, particularly since Swell is fully remote and employees work from 11 different countries. So one goal is to maintain asynchronous communication.\n\nWhen an issue is created in the platform, a little bit of coding is required, but he said non-developer users have adapted well. The feedback so far has been that using GitLab has been frictionless.\n\n## Speed to delivery\nInitially, for some services, it took about 30 minutes to build and deploy an image. Now, the process has been decreased to between one and five minutes in most cases.\n\nSwell manually sets release dates for system improvements and, right now, there are about two a week. The company is working on automating the process for continuous delivery with the goal of soon having releases every couple of hours.\n\n## Team play\nSwell manages team backlogs, sprints, milestones, and future work using its own flavor of Kanban with what Bistolfi calls \"quick labels.\"\n\nEngineering teams are being scaled and, in addition to Kanban, some projects are done using Scrum. Changing their GitLab configuration has let teams measure velocity better.  \n\nA future goal is to gain visibility into team results, as well as use GitLab for project planning and management, he says.\n\n## GitLab as a product and company\nBistolfi is unequivocal in his enthusiasm for GitLab. \"We know that GitLab is there for us to continue growing,\" he says. \"We know we can rely on that. And something that I always tell a team when we are evaluating what we're going to do or how we're going to solve certain problems is that there are areas GitLab is just starting to innovate on or is just starting to launch new features.\"\n\nIf those areas are at 80% of what Swell needs, the company will continue to use GitLab. \"We need to have very, very strong reasons to look for another tool to integrate with GitLab.\" He added that \"we trust that GitLab is going in the right direction for us. In addition, we've gained efficiency in our ability to provide consistent test environments using Gitlab Review Apps to reduce regressions and improve new feature development.\"\n\nThe Swell team also likes that GitLab provides thorough and complete information in its handbook, which has been very beneficial in helping the company manage things internally. \"That has been inspiring for many of us on the executive team,\" he notes.\n\nFor example, during the pandemic, Bistolfi put together a document called \"The Ultimate Guide for Swell Engineers,\" which contains three pages of information about culture, what to expect from teammates, and how to communicate and prioritize tasks.\n\nA lot of guidance came from the GitLab handbook, he adds.\n\nMoving forward with GitLab, Bistolfi says: \"We are incorporating most of the Security and Compliance tools in order to keep track and audit for our compliance. We plan to expand the usage to other projects, but we are already using container and dependency scanning, SAST, secrets detection, and license scanning for some of our core and more sensitive services.\"\n\nWhat Swell likes most about GitLab is the thoroughness of the tool. \"From an engineering perspective, 10 years ago, you would never have imagined all the features and capabilities that GitLab offers being incorporated into one platform,\" Bistolfi says.",[9,976,977,875,793],{"slug":3875,"featured":6,"template":686},"gitlab-is-the-single-source-of-truth-for-ecommerce-provider","content:en-us:blog:gitlab-is-the-single-source-of-truth-for-ecommerce-provider.yml","Gitlab Is The Single Source Of Truth For Ecommerce Provider","en-us/blog/gitlab-is-the-single-source-of-truth-for-ecommerce-provider.yml","en-us/blog/gitlab-is-the-single-source-of-truth-for-ecommerce-provider",{"_path":3881,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3882,"content":3888,"config":3893,"_id":3895,"_type":14,"title":3896,"_source":16,"_file":3897,"_stem":3898,"_extension":19},"/en-us/blog/gitlab-jira-integration-selfmanaged",{"title":3883,"description":3884,"ogTitle":3883,"ogDescription":3884,"noIndex":6,"ogImage":3885,"ogUrl":3886,"ogSiteName":670,"ogType":671,"canonicalUrls":3886,"schema":3887},"How to achieve a GitLab Jira integration","Check out how to integrate GitLab self-managed with Atlassian Jira to connect your merge requests, branches, and commits to a Jira issue.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667260/Blog/Hero%20Images/twopeasinapod.jpg","https://about.gitlab.com/blog/gitlab-jira-integration-selfmanaged","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to achieve a GitLab Jira integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tye Davis\"}],\n        \"datePublished\": \"2021-04-12\",\n      }",{"title":3883,"description":3884,"authors":3889,"heroImage":3885,"date":3890,"body":3891,"category":791,"tags":3892},[3388],"2021-04-12","\n_This is the second in a series of posts on GitLab Jira integration strategies. The [first post](/blog/integrating-gitlab-com-with-atlassian-jira-cloud/) explains how to integrate GitLab.com with Jira Cloud._\n\nThe advantages of a GitLab Jira integration are clear:\n\n* One GitLab project integrates with all the Jira projects in a single Jira instance. \n* Quickly navigate to Jira issues from GitLab. \n* Detect and link to Jira issues from GitLab commits and merge requests. \n* Log GitLab events in the associated Jira issue. \n* Automatically close (transition) Jira issues with GitLab commits and merge requests.\n\nHere's a step-by-step guide of everything you need to know to achieve a GitLab Jira integration.\n\n## Pre-configuration\n\nAs you approach configuring your GitLab project to Jira, you can choose from two options that best fit your company or organization's needs.  You can either:\n\n* Use a service template by having a GitLab administrator provide default values for configuring integrations at the project level. When enabled, the defaults are applied to all projects that do not already have the integration enabled or do not otherwise have custom values enabled. The Jira integration values are all pre-filled on each project's configuration page for jira integration. If you disable the template, these values no longer appear as defaults, while any values already saved for an integration remain unchanged.\n\n* Configure integrations at a specific project level that will contain custom values specific to that project and that project alone.\n\nIt should be noted that each GitLab project can be configured to connect to an entire Jira instance. That means one GitLab project can interact with all Jira projects in that instance, once configured. Therefore, you will not have to explicitly associate a GitLab project with any single Jira project.\n\nGitLab offers several different options that allow you to integrate Jira in a way that best fits you and your team's needs based on how you’ve set up your Jira software. Let’s take a deeper look into how to set-up each of these available options.\n\n## How to configure Jira\n\nThe first step in setting up your Gitlab Jira integration is having your Jira configuration in order. \n\n**Jira Server** supports basic authentication. When connecting, a username and password are required. Note that connecting to Jira Server via CAS is not possible. Set up a user in Jira Server first and then proceed to Configuring GitLab.\n\n**Jira Cloud** supports authentication through an API token, and in order to begin the process you need to start by creating one within Jira. When connecting to Jira Cloud, an email and API token are required. Set up a user in Jira Cloud first and then proceed to Configuring GitLab. \n\nCreate an API token here: https://id.atlassian.com/manage-profile/security/api-tokens  \n\n* Log in to id.atlassian.com with your email address. It is important that the user associated with this email address has write access to projects in Jira\n\n* Click Create API token.\n\n![Create API Token in Jira](https://about.gitlab.com/images/blogimages/atlassianjira/createjiratoken.png){: .shadow.medium.center}\nJira API token creation\n{: .note.text-center}\n\n* Click Copy, or click View and write down the new API token. It is required when configuring GitLab.\n\n![Copy API Token](https://about.gitlab.com/images/blogimages/atlassianjira/copyjiratoken.png){: .shadow.medium.center}\nJira API token copy to clipboard\n{: .note.text-center}\n\n## How to configure GitLab\n\nAs mentioned above, you can begin setting up the Jira integration either by using a service template that defaults all GitLab projects to pre-fill Jira values or you can set up at an individual project level. \n\nTo set up a service template:\n\n* 1a. Navigate to the Admin Area > Service Templates and choose the Jira service template.\n\n![GitLab Service Templates](https://about.gitlab.com/images/blogimages/atlassianjira/GitLabServiceTemplates.png){: .shadow.medium.center}\nGitLab Service Templates\n{: .note.text-center}\n\n2a. For each project, you will still need to configure the issue tracking URLs by replacing :issues_tracker_id in the above screenshot with the ID used by your external issue tracker.\n\n![Issue Tracker ID](https://about.gitlab.com/images/blogimages/atlassianjira/issuetrackerid.png){: .shadow.medium.center}\nIssue Tracker ID\n{: .note.text-center}\n\nTo set up a individual project template:\n\n* 1b. To enable the Jira integration in a project, navigate to the Integrations page and click the Jira service.\n\n![Enable Jira Integration](https://about.gitlab.com/images/blogimages/atlassianjira/enablejiraintegration.png){: .shadow.medium.center}\nEnable Jira Integration\n{: .note.text-center}\n\n* 2b. Select a Trigger action. This determines whether a mention of a Jira issue in GitLab commits, merge requests, or both, should link the Jira issue back to that source commit/MR and transition the Jira issue, if indicated.\n\n![Select Trigger Action](https://about.gitlab.com/images/blogimages/atlassianjira/selecttriggeraction.png){: .shadow.medium.center}\nSelect Trigger Action\n{: .note.text-center}\n\n* 3b. To include a comment on the Jira issue when the above reference is made in GitLab, check Enable comments.\n\n* 3c.  Enter the further details on the page as described in the following table:\n\n| Field | Description |\n|-------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| Web URL | The base URL to the Jira instance web interface which is being linked to this GitLab project. E.g.,  https://jira.example.com. |\n| Jira API URL | The base URL to the Jira instance API. Web URL value will be used if not set. E.g.,  https://jira-api.example.com. Leave this field blank (or use the same value of Web URL) if using Jira Cloud.|\n| Username or Email | Use username for Jira Server or email for Jira Cloud |\n| Transition ID | Required for closing Jira issues via commits or merge requests. This is the ID of a transition in Jira that moves issues to a desired state. If you insert multiple transition IDs separated by , or;, the issue is moved to each state, one after another, using the given order. (See below for obtaining a transition ID) |\n\nIn order to obtain a transition ID, do the following:\n* By using the API, with a request like https://yourcompany.atlassian.net/rest/api/2/issue/ISSUE-123/transitions using an issue that is in the appropriate “open” state\n\n*Note: The transition ID may vary between workflows (e.g., bug vs. story), even if the status you are changing to is the same.*\n\n![Transition ID](https://about.gitlab.com/images/blogimages/atlassianjira/transitionid.png){: .shadow.medium.center}\nTransition ID\n{: .note.text-center}\n\nYour GitLab project can now interact with all Jira projects in your instance and the project now displays a Jira link that opens the Jira project.\n\nWhen you have configured all settings, click **Test settings and save changes.** \n\n![Test settings and save changes](https://about.gitlab.com/images/blogimages/atlassianjira/testsettingsandsavechanges.png){: .shadow.medium.center}\nTest settings and save changes\n{: .note.text-center}\n\nIt should be noted that you can only display issues from a single Jira project within a given GitLab project.\n\nThe integration is now **activated:**\n\n![Active Jira Integration](https://about.gitlab.com/images/blogimages/atlassianjira/activeintegration.png){: .shadow.medium.center}\nActive Jira Integration\n{: .note.text-center}\n\n## Jira Issues\n\nBy now you should have [configured Jira](https://docs.gitlab.com/ee/integration/jira/index.html#configuring-jira) and enabled the [Jira service in GitLab](https://docs.gitlab.com/ee/integration/jira/index.html#configuring-gitlab). If everything is set up correctly you should be able to reference and close Jira issues by just mentioning their ID in GitLab commits and merge requests.\n\nJira issue IDs must be formatted in uppercase for the integration to work.\n\n### 1.How to reference Jira issues\n\nWhen GitLab project has Jira issue tracker configured and enabled, mentioning Jira issue in GitLab will automatically add a comment in Jira issue with the link back to GitLab. This means that in comments in merge requests and commits referencing an issue, e.g., PROJECT-7, will add a comment in Jira issue in the format:\n\nUSER mentioned this issue in RESOURCE_NAME of [PROJECT_NAME|LINK_TO_COMMENT]:\nENTITY_TITLE\n\n* USER A user that mentioned the issue. This is the link to the user profile in GitLab.\n* LINK_TO_THE_COMMENT Link to the origin of mention with a name of the entity where Jira issue was mentioned.\n* RESOURCE_NAME Kind of resource which referenced the issue. Can be a commit or merge request.\n* PROJECT_NAME GitLab project name.\n* ENTITY_TITLE Merge request title or commit message first line.\n\n![Reference Jira issues](https://about.gitlab.com/images/blogimages/atlassianjira/issuelinks.png){: .shadow.medium.center}\nReference Jira issues\n{: .note.text-center}\n\nFor example, the following commit will reference the Jira issue with PROJECT-1 as its ID:\n\ngit commit -m \"PROJECT-1 Fix spelling and grammar\"\n\nClosing Jira Issues\n\nJira issues can be closed directly from GitLab when you push code by using trigger words in commits and merge requests. When a commit which contains the trigger word followed by the Jira issue ID in the commit message is pushed, GitLab will add a comment in the mentioned Jira issue and immediately close it (provided the transition ID was set up correctly).\n\nThere are currently three trigger words, and you can use either one to achieve the same goal:\n* Resolves PROJECT-1\n* Closes PROJECT-1\n* Fixes PROJECT-1\n\nwhere PROJECT-1 is the ID of the Jira issue.\n\nNotes:\n\n* Only commits and merges into the project’s default branch (usually main or master) will close an issue in Jira. You can change your projects default branch under project settings.\n\n* The Jira issue will not be transitioned if it has a resolution.\n\nLet’s consider the following example:\n\n* For the project named PROJECT in Jira, we implemented a new feature and created a merge request in GitLab.\n* This feature was requested in Jira issue PROJECT-7 and the merge request in GitLab contains the improvement\n* In the merge request description we use the issue closing trigger Closes PROJECT-7.\n* Once the merge request is merged, the Jira issue will be automatically closed with a comment and an associated link to the commit that resolved the issue.\n\nIn the following screenshot you can see what the link references to the Jira issue look like.\n\n![GitLab link references](https://about.gitlab.com/images/blogimages/atlassianjira/linkreferences.png){: .shadow.medium.center}\nGitLab link references\n{: .note.text-center}\n\nOnce this merge request is merged, the Jira issue will be automatically closed with a link to the commit that resolved the issue.\n\n![Jira Issue auto closes when GitLab MR merges](https://about.gitlab.com/images/blogimages/atlassianjira/jiraautoclose.png){: .shadow.medium.center}\nJira Issue auto closes when GitLab MR merges\n{: .note.text-center}\n\n## Development Panel Integration Set-Up\n\n### A. Jira DVCS configuration\n\nWhen using the Jira DVCS configuration, there are several different configurations you can make that are dependent on how your Jira/GitLab instances are managed.\n\n* If you are using self-managed GitLab, make sure your GitLab instance is accessible by Jira.\n* If you’re connecting to Jira Cloud, ensure your instance is accessible through the internet.\n* If you are using Jira Server, make sure your instance is accessible however your network is set up.\n\n### B. GitLab account configuration for DVCS\n\n* In GitLab, create a new application to allow Jira to connect with your GitLab account.\nWhile signed in to the GitLab account that you want Jira to use to connect to GitLab, click your profile avatar at the top right, and then click Settings > Applications. Use the form to create a new application.\n\n* In the Name field, enter a descriptive name for the integration, such as Jira.\nFor the Redirect URI field, enter https://\u003Cgitlab.example.com>/login/oauth/callback, replacing \u003Cgitlab.example.com> with your GitLab instance domain. For example, if you are using GitLab.com, this would be https://gitlab.com/login/oauth/callback.\nNote: If using a GitLab version earlier than 11.3, the Redirect URI must be https://\u003Cgitlab.example.com>/-/jira/login/oauth/callback. If you want Jira to have access to all projects, GitLab recommends that an administrator create the application.\n\n![Admin Creates Integration](https://about.gitlab.com/images/blogimages/atlassianjira/admincreates.png){: .shadow.medium.center}\nAdmin Creates Integration\n{: .note.text-center}\n\n* Check API in the Scopes section and uncheck any other checkboxes.\n\n* Click Save application. GitLab displays the generated Application ID and Secret values. Copy these values, which you will use in Jira.\n\n*Tip: To ensure that regular user account maintenance doesn’t impact your integration, create and use a single-purpose jira user in GitLab.*\n\n## Jira DVCS Connector setup\n\nNote: If you’re using GitLab.com and Jira Cloud, we recommend you use the [GitLab for Jira app](https://docs.gitlab.com/ee/integration/jira/index.html), unless you have a specific need for the DVCS Connector.\n\n* Ensure you have completed the [GitLab configuration](https://docs.gitlab.com/ee/integration/jira/index.html).\n\n![Check api in Applications](https://about.gitlab.com/images/blogimages/atlassianjira/checkapi.png){: .shadow.medium.center}\nCheck api in Applications\n{: .note.text-center}\n\n![Application was created successfully](https://about.gitlab.com/images/blogimages/atlassianjira/applicationsuccessful.png){: .shadow.medium.center}\nApplication was created successfully\n{: .note.text-center}\n\n* If you’re using Jira Server, go to Settings (gear) > Applications > DVCS accounts. If you’re using Jira Cloud, go to Settings (gear) > Products > DVCS accounts.\n\n![Go to DVCS in Settings](https://about.gitlab.com/images/blogimages/atlassianjira/dvcssettings.png){: .shadow.medium.center}\nGo to DVCS in Settings\n{: .note.text-center}\n\n* Click Link GitHub Enterprise account to start creating a new integration. (We’re pretending to be GitHub in this integration, until there’s additional platform support in Jira.)\n\n![Click Link to start new integration](https://about.gitlab.com/images/blogimages/atlassianjira/dvcsaccount.png){: .shadow.medium.center}\nClick Link to start new integration\n{: .note.text-center}\n\n* Complete the form:\nSelect GitHub Enterprise for the Host field.\nIn the Team or User Account field, enter the relative path of a top-level GitLab group that you have access to, or the relative path of your personal namespace.\n\n![Add new account](https://about.gitlab.com/images/blogimages/atlassianjira/addnewaccount.png){: .shadow.medium.center}\nAdd new account\n{: .note.text-center}\n\nIn the Host URL field, enter https://\u003Cgitlab.example.com>/, replacing \u003Cgitlab.example.com> with your GitLab instance domain. For example, if you are using GitLab.com, this would be https://gitlab.com/.\n\n*Note: If using a GitLab version earlier than 11.3 the Host URL value should be https://\u003Cgitlab.example.com>/-/jira*\n\nFor the Client ID field, use the Application ID value from the previous section.\n\nFor the Client Secret field, use the Secret value from the previous section.\n\nEnsure that the rest of the checkboxes are checked.\n\n* Click Add to complete and create the integration.\nJira takes up to a few minutes to know about (import behind the scenes) all the commits and branches for all the projects in the GitLab group you specified in the previous step. These are refreshed every 60 minutes.\n\nIn the future, we plan on implementing real-time integration. If you need to refresh the data manually, you can do this from the Applications -> DVCS accounts screen where you initially set up the integration:\n\n![Refresh data manually](https://about.gitlab.com/images/blogimages/atlassianjira/refreshdata.png){: .shadow.medium.center}\nRefresh data manually\n{: .note.text-center}\n\nTo connect additional GitLab projects from other GitLab top-level groups (or personal namespaces), repeat the previous steps with additional Jira DVCS accounts.\n\nFor troubleshooting your DVCS connection, go to [GitLab Docs](https://docs.gitlab.com/ee/integration/jira/index.html) for more information.\n\n_In our next blog post we'll look at [Usage](https://docs.gitlab.com/ee/integration/jira_development_panel.html#usage)._\n\nCover image by [Mikołaj Idziak](https://unsplash.com/@mikidz) on [Unsplash](https://unsplash.com/photos/nwjRmbXbLgw).\n{: .note.text-left}\n",[1041,9,977],{"slug":3894,"featured":6,"template":686},"gitlab-jira-integration-selfmanaged","content:en-us:blog:gitlab-jira-integration-selfmanaged.yml","Gitlab Jira Integration Selfmanaged","en-us/blog/gitlab-jira-integration-selfmanaged.yml","en-us/blog/gitlab-jira-integration-selfmanaged",{"_path":3900,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3901,"content":3907,"config":3914,"_id":3916,"_type":14,"title":3917,"_source":16,"_file":3918,"_stem":3919,"_extension":19},"/en-us/blog/gitlab-named-a-leader-in-the-2024-gartner-magic-quadrant-for-devops",{"title":3902,"description":3903,"ogTitle":3902,"ogDescription":3903,"noIndex":6,"ogImage":3904,"ogUrl":3905,"ogSiteName":670,"ogType":671,"canonicalUrls":3905,"schema":3906},"GitLab named 2024 Gartner DevOps Platforms Quadrant leader","GitLab is positioned highest in Ability to Execute and Completeness of Vision, which we believe is recognition of our customers’ success and our continued innovation in the DevOps category.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662523/Blog/Hero%20Images/Gartner_DevOps_Blog_Post_Cover_Image_1800x945__2_.png","https://about.gitlab.com/blog/gitlab-named-a-leader-in-the-2024-gartner-magic-quadrant-for-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab named a Leader in the 2024 Gartner Magic Quadrant for DevOps Platforms\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ashley Kramer\"}],\n        \"datePublished\": \"2024-09-05\",\n      }",{"title":3908,"description":3903,"authors":3909,"heroImage":3904,"date":3911,"body":3912,"category":726,"tags":3913},"GitLab named a Leader in the 2024 Gartner Magic Quadrant for DevOps Platforms",[3910],"Ashley Kramer","2024-09-05","DevOps was originally just a concept, a methodology for delivering software faster by bringing traditionally disparate teams together. It was a response to all the issues caused by the separation of those who built software and those who deployed it.\n\nAt GitLab, we iterated on that concept: Instead of stitching together tools to create a complex DevOps toolchain, a [single DevOps platform](https://about.gitlab.com/platform/) would result in tighter collaboration, greater automation, and more scalable and standardized processes.\n\nWe believe that strategy, which focuses on our customers' success, was correct. In the second iteration of the [Gartner Magic Quadrant for DevOps Platforms](https://about.gitlab.com/gartner-magic-quadrant/), we are once again named a Leader by Gartner and this time, positioned highest on both axes: Ability to Execute and Completeness of Vision.\n\n![Gartner MQ for DevOps Platforms 2024 image](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674334/Blog/Content%20Images/figure1.png)\n\n> Download the [2024 Gartner® Magic Quadrant™ for DevOps Platforms report](https://about.gitlab.com/de-de/gartner-magic-quadrant/).\n\nToday’s software organizations must contend with increasing security threats, complex compliance requirements, and carefully adopting new technologies such as generative AI. This is in addition to simply delivering on their promises of scalable services and continued innovation to their own customers.\n\nGitLab helps our customers face these challenges and become leaders in their own industries. With our AI-powered DevSecOps platform, they are shifting security left, enabling visibility throughout the development lifecycle, and bringing together all the roles and responsibilities needed to deliver the software that powers our world.\n\n## Furthering the DevOps vision\n\nOur work here isn’t done. We will continue to innovate on the DevOps vision and advance our DevSecOps platform in two ways.\n\nFirst, we want to invite even more teams to collaborate on the same platform, with specific features for those involved in [Agile planning](https://about.gitlab.com/blog/categories/agile-planning/), [data science](https://about.gitlab.com/topics/devops/the-role-of-ai-in-devops/), and [observability and application monitoring](https://docs.gitlab.com/operations/observability/).\n\nSecond, we strive to make our platform adoption and deployment options even more flexible to meet our customers’ diverse needs. This includes investing in [GitLab Dedicated](https://about.gitlab.com/dedicated/), our single-tenant, hosted option, so companies in highly regulated industries can have the simplicity of SaaS and the power of all the latest features and capabilities, while adhering to the compliance needs of isolated infrastructure.\n\n## Helping organizations build secure software\n\nBeyond building a better collaboration platform for delivering software, one of the most important things we do at GitLab is help organizations build more secure and compliant software. Our vision here sets us apart, as GitLab integrates [security scanning](https://about.gitlab.com/solutions/security-compliance/) at the point of code commit, not when applications are ready for release. This helps teams catch vulnerabilities sooner, leading to faster release cycles. GitLab also makes compliance easy with policy guardrails and automatically generating [a software bill of materials](https://about.gitlab.com/blog/the-ultimate-guide-to-sboms/).\n\nWe know our customers face more security threats as their own software surface attack area increases. This is why, in the next 12 months, we plan to continue improving our SAST scanners, add additional policy controls, and build [an upcoming native secrets manager](https://about.gitlab.com/blog/gitlab-native-secrets-manager-to-give-software-supply-chain-security-a-boost/).\n\n## Leading with AI throughout the SDLC\n\nOur vision is to also be a leader in AI – both in enabling our customers to build innovative software with AI, and also to do it with privacy-first AI technology. AI represents a generational leap forward with an incredible amount of opportunity when integrated throughout the software development lifecycle. As we innovate, we are doing so responsibly. We’ve heard our customers’ concerns loud and clear: They want [AI with guardrails](https://about.gitlab.com/the-source/ai/velocity-with-guardrails-ai-automation/), [AI that’s transparent](https://about.gitlab.com/ai-transparency-center/), and AI that respects their code and intellectual property.\n\nWe are committed to building [GitLab Duo](https://about.gitlab.com/gitlab-duo/), a suite of AI-powered features for our DevSecOps platform that are all of these: comprehensive, privacy-first, and built to support the entire software development lifecycle.\n\nWe believe this commitment and our GitLab Duo features are why, recently, [Gartner® also named us a Leader in its first Magic Quadrant™ for AI Code Assistants](https://about.gitlab.com/blog/gitlab-named-a-leader-in-2024-gartner-magic-quadrant-for-ai-code-assistants/).\n\nWe are honored by this recognition and see it as a sign to continue listening to you  –  our customers – because that is what drives our vision, product roadmap, and commitment in delivering the best DevSecOps platform.\n\n> Download the [2024 Gartner® Magic Quadrant™ for DevOps Platforms report](https://about.gitlab.com/gartner-magic-quadrant/).\n\n***Source: Gartner, Magic Quadrant for DevOps Platforms, Keith Mann, Thomas Murphy, Bill Holz, George Spafford, August 2024***\n\n***GARTNER is a registered trademark and service mark of Gartner, Inc. and/or its affiliates in the U.S. and internationally, and MAGIC QUADRANT is a\nregistered trademark of Gartner, Inc. and/or its affiliates and are used herein with permission. All rights reserved.***\n\n***Gartner does not endorse any vendor, product or service depicted in its research publications, and does not advise technology users to select only those vendors with the highest ratings or other designation. Gartner research publications consist of the opinions of Gartner’s research organization and should not be construed as statements of fact. Gartner disclaims all warranties, expressed or implied, with respect to this research, including any warranties of merchantability or fitness for a particular purpose.***\n\n***This graphic was published by Gartner Inc. as part of a larger report and should be evaluated in the context of the entire document. The Gartner document is available upon request from Gartner.***",[726,2981,479,9,2243],{"slug":3915,"featured":91,"template":686},"gitlab-named-a-leader-in-the-2024-gartner-magic-quadrant-for-devops","content:en-us:blog:gitlab-named-a-leader-in-the-2024-gartner-magic-quadrant-for-devops.yml","Gitlab Named A Leader In The 2024 Gartner Magic Quadrant For Devops","en-us/blog/gitlab-named-a-leader-in-the-2024-gartner-magic-quadrant-for-devops.yml","en-us/blog/gitlab-named-a-leader-in-the-2024-gartner-magic-quadrant-for-devops",{"_path":3921,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3922,"content":3928,"config":3933,"_id":3935,"_type":14,"title":3936,"_source":16,"_file":3937,"_stem":3938,"_extension":19},"/en-us/blog/gitlab-names-joel-krooswyk-as-its-first-federal-cto",{"title":3923,"description":3924,"ogTitle":3923,"ogDescription":3924,"noIndex":6,"ogImage":3925,"ogUrl":3926,"ogSiteName":670,"ogType":671,"canonicalUrls":3926,"schema":3927},"GitLab names Joel Krooswyk as its first Federal CTO","New role reaffirms company’s commitment to the public sector.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669378/Blog/Hero%20Images/bab_cover_image.jpg","https://about.gitlab.com/blog/gitlab-names-joel-krooswyk-as-its-first-federal-cto","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab names Joel Krooswyk as its first Federal CTO\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-11-14\",\n      }",{"title":3923,"description":3924,"authors":3929,"heroImage":3925,"date":3930,"body":3931,"category":726,"tags":3932},[745],"2022-11-14","[Gitlab Federal](/solutions/public-sector/), LLC, provider of The One DevOps Platform for the public sector, announced that [Joel Krooswyk](https://gitlab.com/jkrooswyk), former Senior Manager of Solutions Architecture, has been named Federal CTO.\n\n![Photo of Joel Krooswyk](https://about.gitlab.com/images/blogimages/krooswyk.jpg){: .shadow.small.left.wrap-text}\n\n“The creation of the Federal CTO position recognizes the importance of the public sector in the world of DevSecOps. Joel’s experience allows him to provide expert insight to government agencies as they seek guidance on DevOps practices, building software factories, meeting compliance requirements and more,” says [Bob Stevens](https://gitlab.com/bstevens1), Vice President of Public Sector at GitLab. “We are excited to reaffirm our commitment to the public sector through this new role and Joel’s appointment.”\n\nAs Federal CTO, Krooswyk will ensure that GitLab has a voice in developing key [DevSecOps](/topics/devsecops/) practices coming from standards bodies, Congressional committees, industry working groups, and other influential organizations. He also will assist GitLab in continuing to build and strengthen relationships with federal DevSecOps professionals to help them streamline and secure their software development environments with a DevSecOps platform.\n\n“This is an exciting time in DevSecOps, and the federal government is on the leading edge, helping navigate such challenging issues as software supply chain security and regulatory compliance. I am thrilled to step into this new role and to be GitLab’s voice at the table, ensuring that our software development and security technology and practices are reflected in efforts across the public sector,” Krooswyk says.\n\nKrooswyk has actively been involved in GitLab’s growth since 2017. He has 25 years of experience in the software industry. His experience spans development, QA, product management, portfolio planning, and technical sales, and he has written a half million lines of unique code throughout his career. Joel holds a B.S. in Electrical Engineering from Purdue University as well as multiple industry certifications.",[726,9,728,184],{"slug":3934,"featured":6,"template":686},"gitlab-names-joel-krooswyk-as-its-first-federal-cto","content:en-us:blog:gitlab-names-joel-krooswyk-as-its-first-federal-cto.yml","Gitlab Names Joel Krooswyk As Its First Federal Cto","en-us/blog/gitlab-names-joel-krooswyk-as-its-first-federal-cto.yml","en-us/blog/gitlab-names-joel-krooswyk-as-its-first-federal-cto",{"_path":3940,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3941,"content":3947,"config":3954,"_id":3956,"_type":14,"title":3957,"_source":16,"_file":3958,"_stem":3959,"_extension":19},"/en-us/blog/gitlab-on-vmware-cloud-marketplace",{"title":3942,"description":3943,"ogTitle":3942,"ogDescription":3943,"noIndex":6,"ogImage":3944,"ogUrl":3945,"ogSiteName":670,"ogType":671,"canonicalUrls":3945,"schema":3946},"GitLab for Cloud Native Transformation on VMware Marketplace","Guest authors from VMware share how to accelerate your software delivery process in just a few clicks with Bitnami and GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680841/Blog/Hero%20Images/bitnami-gitlab.png","https://about.gitlab.com/blog/gitlab-on-vmware-cloud-marketplace","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Enterprise Edition now available for VMware Cloud Marketplace users\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Raquel Campuzano\"}],\n        \"datePublished\": \"2019-10-11\",\n      }",{"title":3948,"description":3943,"authors":3949,"heroImage":3944,"date":3951,"body":3952,"category":299,"tags":3953},"GitLab Enterprise Edition now available for VMware Cloud Marketplace users",[3950],"Raquel Campuzano","2019-10-11","\n\nHave you ever tried to choose from an extensive list of developer tools and wondered what you should do next? You’re not alone. There are hundreds of solutions to choose from, which can make it challenging to select the right solution and deploy.\n\nNow, GitLab and Bitnami have partnered to offer VMware users [GitLab](https://marketplace.cloud.vmware.com/services/details/129dc4e9-191d-405f-ab4d-803d56f366a9) in the VMware Cloud Marketplace. This version package is free, fully functional, and easy to [upgrade to an enterprise plan](https://docs.bitnami.com/vmware-marketplace/apps/gitlab-ee/get-started/license/).\n\n### Reduce costs and avoid security risks\nAs the industry leader in application packaging, Bitnami helped GitLab create an easy, click-to-deploy, open source solution. The GitLab Enterprise Edition (CORE) Virtual Appliance certified by Bitnami is an up-to-date and secure image that includes the latest versions of the application, its components, and the most recent security fixes. You can run GitLab with confidence; Bitnami’s automated pipeline and tools for building and testing applications ensure this application can run on any platform without issues. If you experience any problems deploying the solution, you can contact the [Bitnami Support team](https://community.bitnami.com/c/gitlab) with your questions.\n\n### Run on VMware infrastructure in a few clicks\nTo make GitLab available in the [VMware Cloud Marketplace](https://marketplace.cloud.vmware.com/services/details/129dc4e9-191d-405f-ab4d-803d56f366a9), GitLab placed its trust in Bitnami’s expertise in packaging. GitLab users now have the ability to run the latest version on their VMware infrastructure in a few clicks.\n\n### Some of the key benefits of GitLab's marketplace listing:\n* GitLab includes a built-in container registry and Kubernetes integration, enabling you to quickly create a [continuous integration (CI)](/solutions/continuous-integration/) pipeline with Kubernetes. Learn more about [creating a CI/CD pipeline with GitLab and Kubernetes](https://docs.bitnami.com/tutorials/create-ci-cd-pipeline-gitlab-kubernetes/).\n* By deploying GitLab on a VMware cloud server, you can add a budget- and resource-checking stage to your pipeline. This allows you to implement best practices into your continuous deployment (CD) process and control the consumption and costs of your application deployments.\n* Premium features such as code quality and performance testing, static and dynamic application security testing, package dependency analysis, and automated tests for vulnerabilities enable you to identify and remediate issues and security breaches from development to monitoring stages. Learn more about [building misconfiguration and vulnerability checks into your CI/CD pipeline to achieve continuous security](https://thenewstack.io/how-continuous-security-can-solve-the-cloud-protection-conundrum/).\n\n### How do you get started? We’ll show you how\nIn order to upgrade your GitLab Core version to enjoy the Enterprise Edition features, take the following steps:\n\n1) First log into the [VMware Cloud Marketplace](https://marketplace.cloud.vmware.com/services/details/129dc4e9-191d-405f-ab4d-803d56f366a9) and browse for the “GitLab Enterprise (CORE) Virtual Appliance” solution.\n\n2) Then click to view the details. Note: The GitLab Enterprise (CORE) Virtual Appliance is available in the [VMware Cloud Marketplace](https://marketplace.cloud.vmware.com/services/details/129dc4e9-191d-405f-ab4d-803d56f366a9) in two deployment options: VMware Cloud on AWS (VMC) or vCloud Director (VCD).\n\n![GitLab is available in the VMware Cloud Marketplace in two deployment options: VMware Cloud on AWS (VMC) or vCloud Director (VCD)](https://about.gitlab.com/images/blogimages/gitlabonvmware1.png){: .shadow.medium.center}\n\n3) To deploy the application both on VMC or VCD, you need to first subscribe to the image, as shown below:\n\n![To deploy the application both on VMC or VCD, you need to first subscribe, as shown below](https://about.gitlab.com/images/blogimages/subscribetovmwmarketplace.png){: .shadow.medium.center}\n\n4) Then, select the platform where you wish to deploy it, as shown below:\n\n![After subscribing, select the VMC or VCD platform where you wish to deploy](https://about.gitlab.com/images/blogimages/deploytovmwplatform.png){: .shadow.medium.center}\n\n5) Depending on the platform you select, you will be redirected to the vSphere Client or vCloud Director platform. Follow these instructions to launch a [GitLab Enterprise (CORE) Virtual Appliance using the vSphere Client](https://docs.bitnami.com/vmware-marketplace/apps/gitlab-ee/get-started/get-started-vmware-cloud/) or as a [vApp from VMware vCloud Director](https://docs.bitnami.com/vmware-marketplace/get-started-vcloud-director/).\n\n6) When you deploy the [GitLab Enterprise (CORE) Virtual Appliance certified by Bitnami](https://marketplace.cloud.vmware.com/services/details/129dc4e9-191d-405f-ab4d-803d56f366a9), you get the free and fully functional [Core version of GitLab](/pricing/#self-managed), which is easily upgradable to Starter, Premium, or Ultimate. To upgrade, sign into the application, navigate to the “Admin Area,” and then select the “License” menu option. As you can see in the image below, you now have the option to either upload your `.gitlab-license` file or start a [free trial](/free-trial/).\n\nNote: If you start a free trial, you will be able to try all the paid features for 30 days. After that time, your server will revert to Core features.\n{: .alert .alert-info}\n\n![To upgrade, sign into the application, navigate to the “Admin Area,” and then select the “License” menu option](https://about.gitlab.com/images/blogimages/vmwmarketplacefreetrial.png){: .shadow.medium.center}\n\n\n7) Once you activate your license, paid features will be enabled as shown below and you can start deploying with confidence.\n\n![Once you activate your license, paid features will be enabled](https://about.gitlab.com/images/blogimages/vmwpremiumfeatures.png){: .shadow.medium.center}\n\n## Conclusion\n\nWhat used to be a complex task is now just a few clicks, without compromising your budget and your security. Enjoy all the advantages of the GitLab in the VMware Cloud Marketplace and accelerate your software delivery process by leveraging the simplicity of the Bitnami experience.\n\n[Get started now](https://marketplace.cloud.vmware.com/services/details/129dc4e9-191d-405f-ab4d-803d56f366a9). If you have any questions, feel free to reach out to the Bitnami Support team!\n\n### About the guest author\n\nRaquel Campuzano is a Content Marketing Specialist at Bitnami, now part of VMware. She is in charge of managing the creation of technical content that allows developers to deploy awesome software everywhere. Raquel was part of the Bitnami team as technical writer. Her know-how creating tutorials, product documentation, and videos gave her the ability to identify in which stage of developer’s journey the user experience can be improved.\n\nPrevious to Bitnami, she led the communication and marketing strategy for Redborder (cybersecurity) and Oklan (network and hosting services). She is also a member of Ping a Programadoras, a non-profit organisation focused on promoting women’s inclusion in programming and software development.\n",[109,1041,9,231],{"slug":3955,"featured":6,"template":686},"gitlab-on-vmware-cloud-marketplace","content:en-us:blog:gitlab-on-vmware-cloud-marketplace.yml","Gitlab On Vmware Cloud Marketplace","en-us/blog/gitlab-on-vmware-cloud-marketplace.yml","en-us/blog/gitlab-on-vmware-cloud-marketplace",{"_path":3961,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3962,"content":3968,"config":3974,"_id":3976,"_type":14,"title":3977,"_source":16,"_file":3978,"_stem":3979,"_extension":19},"/en-us/blog/gitlab-provides-small-business-with-a-professional-mature-devops-platform",{"title":3963,"description":3964,"ogTitle":3963,"ogDescription":3964,"noIndex":6,"ogImage":3965,"ogUrl":3966,"ogSiteName":670,"ogType":671,"canonicalUrls":3966,"schema":3967},"GitLab provides small business with a professional, mature DevOps platform","Blonk had a small team but a big need for professional software development. Here's how GitLab helped.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668272/Blog/Hero%20Images/blonklogo.png","https://about.gitlab.com/blog/gitlab-provides-small-business-with-a-professional-mature-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab provides small business with a professional, mature DevOps platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Esther Shein\"}],\n        \"datePublished\": \"2022-05-19\",\n      }",{"title":3963,"description":3964,"authors":3969,"heroImage":3965,"date":3971,"body":3972,"category":769,"tags":3973},[3970],"Esther Shein","2022-05-19","\nBlonk is an international leader in the field of environmental and sustainability research in the agri-food sector. But as a small business without a QA team or a security team, the challenge was figuring out how to deliver professional software with only a few developers.\n\n[Blonk](https://blonksustainability.nl) used an external company to help set up what Bart Durlinger, product development manager, and software devevloper Pieter van de Vijver envisioned as its platform at the time. “They set up an environment on Amazon, a separate built server, a separate repository, and then some scripts in between to link it all together,” Durlinger recalls. “But when we decided to take more control, that was just too complex. We had too many different parts in many different places. We didn't have the capacity at the time to really oversee how this should all work together.”\n\nThat's when the Blonk team started looking for platforms that offered a more integrated approach, with project management, CI/CD, repository, and version control features all in one place.\n\n## Mature, with a modern vision of software development\n\nBlonk turned to GitLab after finding that the platform “had a lot of the things you need to have a professional delivery pipeline integrated into one solution,\" says Durlinger. At the time, the consultancy was using GitHub, which was more expensive, he says.\n\nWhen Blonk started with GitLab, the platform was free, which was a big factor in its selection, van de Vijver says. “But it was also an up-and-coming startup with a vision of that CI/CD integration built into how you envisioned the whole service itself,\" he says. “GitHub was more of a repository that might provide you with those things, but it required more manual setup.”\n \nBlonk liked that GitLab was a mature and stable solution “but still new enough to have a vision of how software is approached nowadays with easy setup and an integrated pipeline by default, and useful branching strategies by which you could support a multi-level, multi-stage deployment process easily,\" Van de Vijver says.\n\nAt the time Van de Vijver was the only one at Blonk with a background as a software developer, and another bonus was his familiarity with all the tools in GitLab. “By using GitLab, we could hit the ground running, and keep the scale small. You don't have to worry about all kinds of CI/CD operations and integrations and the configuration of that but use it just out of the box,” he says.\n\n## How Blonk is utilizing GitLab today\n\nCurrently, Blonk has 38 GitLab premium licenses, about half of which are used by software developers. The rest are used by data scientists, consultants, project managers, and others, so there are different ways the platform is utilized within the company; that also means there are different levels of software literacy but that hasn’t been an issue. The software development team has been onboarding very junior developers over the past couple of months, and “never have I had questions of how to do stuff in GitLab, because the platform is very intuitive,” Durlinger says.\n\nThe software development team has been integrated further into the core business, which also fits nicely with GitLab’s services, including the milestones Blonk uses as well as its repositories and project management strategies. “Also data scientists and methodology developers are now using GitLab projects for the project planning sometimes,” Durlinger notes.\n\nGitLab provided Blonk with a professional software environment for their developers. GitLab also lets the team use pre-built Docker images and a private Python package repository in their CI/CD pipelines, which means faster build times and easy integration, according to Durlinger. “That's a huge change because then we can distribute the work over multiple teams that can work independently on projects,” he says.\n\nThe platform’s automation features have also improved operational efficiency. “We don't need to communicate with external parties, or do any manual steps if we make code changes. We now are in control of managing our software and infrastructure deployment via CDK and gitlab-ci scripts, which makes it fully automated,” Durlinger explains.\n\nIn the project planning stage, Blonk is using GitLab issue templates to define issues, “and that also has really improved the quality of how we define issues to start with,\" Durlinger adds. Blonk has reaped huge benefits from the Agile capabilities of GitLab to plan, manage and monitor their workflows.  \n\nBlonk now has improved transparency and collaboration amongst their teams, and they are using the GitLab Wiki to build an internal knowledge base to optimize productivity and accelerate new developer onboarding.\n\nGitLab has supported the scaling of the developer team from 2 to now 16 developers, going from a single team to 3 software teams and a data science team, all using the One DevOps Platform bringing a much needed single DevOps workflow. Blonk is using the package registry, Docker integration with GitLab, and each team now deploys microservices on AWS. Teams are facilitated via GitLab with enhanced communication and a robust feedback loop.  \n\nProbably the biggest selling point of the platform is that it offers an integrated environment of all solutions related to code management and deployment – from container services to package registry services – everything Blonk wants to use in a pipeline and be able to manage privately, according to Durlinger. The fact that Blonk no longer has to use multiple tools in an ad hoc manner is another benefit. \n\n“What’s really nice is that our non-code artifacts live together with the code,\" Durlinger notes. “Our designs, methodology documents, and prototypes developed by data scientists can all be part of our Gitlab projects. This has improved workflow throughout the organization,” he says.\n\n## Looking ahead\n\n“As Blonk continues its business transformation, GitLab is helping the company maintain its reputation as a reliable and honest company,” Durlinger says. GitLab has added value to their employer brand and makes them more attractive to new developers to join: “It demonstrates that we have a professional environment for software engineers.”\n\nBlonk’s goal is to improve sustainability performance analysis, and ensure that the tools they are building have the same integrity and quality. “GitLab enables us to do this by having a professional project creation pipeline in place,\" Durlinger says.\n",[9,793,109],{"slug":3975,"featured":6,"template":686},"gitlab-provides-small-business-with-a-professional-mature-devops-platform","content:en-us:blog:gitlab-provides-small-business-with-a-professional-mature-devops-platform.yml","Gitlab Provides Small Business With A Professional Mature Devops Platform","en-us/blog/gitlab-provides-small-business-with-a-professional-mature-devops-platform.yml","en-us/blog/gitlab-provides-small-business-with-a-professional-mature-devops-platform",{"_path":3981,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3982,"content":3987,"config":3994,"_id":3996,"_type":14,"title":3997,"_source":16,"_file":3998,"_stem":3999,"_extension":19},"/en-us/blog/gitlab-runner-update-required-to-use-auto-devops-and-sast",{"title":3983,"description":3984,"ogTitle":3983,"ogDescription":3984,"noIndex":6,"ogImage":1861,"ogUrl":3985,"ogSiteName":670,"ogType":671,"canonicalUrls":3985,"schema":3986},"GitLab Runner update required to use SAST in Auto DevOps","Make sure you upgrade GitLab Runner to 11.5+ to coninue using SAST in Auto DevOps.","https://about.gitlab.com/blog/gitlab-runner-update-required-to-use-auto-devops-and-sast","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Runner update required to use SAST in Auto DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fabio Busatto\"}],\n        \"datePublished\": \"2018-12-06\",\n      }",{"title":3983,"description":3984,"authors":3988,"heroImage":1861,"date":3990,"body":3991,"category":791,"tags":3992},[3989],"Fabio Busatto","2018-12-06","\n\nWe are introducing a major change for the [SAST] job definition for [Auto DevOps] with **GitLab 11.6**, shipping Dec. 22.\nAs a result, SAST jobs will fail after the upgrade to GitLab 11.6 if they are picked up by a version of [GitLab Runner]\nprior to 11.5. The jobs will fail, but they will not block pipelines. However, you won't see results\nfor SAST in the merge request or at the pipeline level anymore.\n\nThe same change will happen for [Dependency Scanning], [Container Scanning], [DAST], and [License Management] in future releases.\n\n## Why did this happen?\n\nThe [new job definition] uses the [`reports` syntax], which is necessary to show SAST results in the [Group Security Dashboard].\nUnfortunately, this syntax is not supported by GitLab Runner prior to 11.5.\n\n## Who is affected?\n\nYou are affected by this change if you meet **all** the requirements in the following list:\n1. You are using Auto DevOps **AND**\n1. you have at least one GitLab Runner 11.4 or older set up for your projects **AND**\n1. you are interested in security reports.\n\n## Who is not affected?\n\nYou are **not** affected by this change if you meet **at least one** of the requirements in the following list:\n1. You are not using Auto DevOps **OR**\n1. you are using only GitLab Runner 11.5 or newer **OR**\n1. you are using only shared runners on GitLab.com (we already upgraded them) **OR**\n1. you are not interested in security reports.\n\n## How to solve the problem\n\nIf you are not affected by the change, you don't need to take any action.\n\nIf you are affected, you should upgrade your GitLab Runners to version 11.5 or newer as soon as possible.\nIf you don't, you will not have new SAST reports until you do upgrade. If you upgrade your runners later, SAST will\nstart to work again correctly.\n\n## Which is the expected timeline?\n\nGitLab 11.6 will be released on **Dec. 22**.  This change may also be shipped in an early release\ncandidate (RC) version.\n\nIf you are using a **self-managed** GitLab instance, and you don't install RC versions, you will be affected when\nyou'll upgrade to GitLab 11.6.\n\nIf you are using **GitLab.com**, you will be affected as soon as the RC version with the change will be deployed.\n\nFeel free to reach out to us with any further questions!\n\n[SAST]: https://docs.gitlab.com/ee/user/application_security/sast/\n[Auto DevOps]: https://docs.gitlab.com/ee/topics/autodevops/\n[new job definition]: https://docs.gitlab.com/ee/user/application_security/sast/\n[`reports` syntax]: https://docs.gitlab.com/ee/ci/yaml/#artifactsreportssast-ultimate\n[Group Security Dashboard]: https://docs.gitlab.com/ee/user/application_security/security_dashboard/\n[GitLab Runner]: https://docs.gitlab.com/runner/\n[Dependency Scanning]: https://docs.gitlab.com/ee/user/application_security/dependency_scanning/\n[Container Scanning]: https://docs.gitlab.com/ee/user/application_security/container_scanning/\n[DAST]: https://docs.gitlab.com/ee/user/application_security/dast/\n[License Management]: https://docs.gitlab.com/ee/user/compliance/license_compliance/index.html\n",[976,9,916,3993,875],"releases",{"slug":3995,"featured":6,"template":686},"gitlab-runner-update-required-to-use-auto-devops-and-sast","content:en-us:blog:gitlab-runner-update-required-to-use-auto-devops-and-sast.yml","Gitlab Runner Update Required To Use Auto Devops And Sast","en-us/blog/gitlab-runner-update-required-to-use-auto-devops-and-sast.yml","en-us/blog/gitlab-runner-update-required-to-use-auto-devops-and-sast",{"_path":4001,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4002,"content":4008,"config":4013,"_id":4015,"_type":14,"title":4016,"_source":16,"_file":4017,"_stem":4018,"_extension":19},"/en-us/blog/gitlab-serverless-with-cloudrun-for-anthos",{"title":4003,"description":4004,"ogTitle":4003,"ogDescription":4004,"noIndex":6,"ogImage":4005,"ogUrl":4006,"ogSiteName":670,"ogType":671,"canonicalUrls":4006,"schema":4007},"Announcing GitLab Serverless deploying to Cloud Run for Anthos","Discover how we're making it easier to deploy serverless workloads on-premise with Anthos.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666851/Blog/Hero%20Images/gitlab-serverless-blog.png","https://about.gitlab.com/blog/gitlab-serverless-with-cloudrun-for-anthos","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Announcing GitLab Serverless deploying to Cloud Run for Anthos\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mayank Tahilramani\"}],\n        \"datePublished\": \"2019-11-19\",\n      }",{"title":4003,"description":4004,"authors":4009,"heroImage":4005,"date":2200,"body":4011,"category":1318,"tags":4012},[4010],"Mayank Tahilramani","\nThis week at Google Cloud Next ’19 UK, Google Cloud grew its Anthos product portfolio with the addition of Cloud Run for Anthos running on-prem. I’m excited to share that GitLab has been collaborating with Google Cloud product teams to support this launch and enable customers with CI/CD and [GitLab Serverless](/topics/serverless/) capabilities for quicker and easier adoption of serverless solutions. In the spirit of our partnership, our support for [Cloud Run for Anthos](https://cloud.google.com/run) is a continuation of our collaboration [announced earlier this year at Google Cloud Next ’19 in San Francisco](/blog/running-a-consistent-serverless-platform/), where we showed how you can deploy a serverless function to Cloud Run using the same developer workflow you’re already familiar with in GitLab. Now, we’re looking to bring that same UX and workflow consistency to Cloud Run deployments on Anthos running on-premise. Overall, together, GitLab and Google Cloud are aiming to lower the barrier of adoption for customers looking to architect scalable, cloud native solutions. \n\nHowever, when discussing cloud native, oftentimes ‘public cloud infrastructure’ comes to mind. But when I think of cloud native, I think of the various, modern ways of architecting scalable solutions, backed by managed services to make operations more convenient. Until very recently, infrastructure-centric managed services like Google Kubernetes Engine (GKE), Cloud Run, StackDriver, etc. have been traditionally associated with workloads running within cloud data centers. Given the recent announcements of [Google Cloud Anthos](https://cloud.google.com/blog/products/serverless/knative-based-cloud-run-services-are-ga), Google is clearly broadening the boundaries of cloud native across hybrid and heterogeneous environments, including customer data centers. As the infrastructure landscape diversifies, as application development intertwines with abstraction layers of managed services, and as workload flexibility becomes inherent with microservice containerization, the one thing you can rely on staying consistent is GitLab’s developer workflow to supplement all the above. In the context of all things [serverless](/topics/serverless/), let's take a closer look at what’s available today, what we’re still working on, and what that means for our users.\n\n## What’s available today\n\nGitLab serves as a single application for all of [DevOps](/topics/devops/), which includes building, deploying, and managing serverless applications. GitLab serverless enables developers to focus on writing application code without having to worry about Kubernetes or Knative YAML configuration. GitLab provides templates allowing developers to easily build and deploy Knative services that can be deployed to Cloud Run. Here is a [quick video walkthrough on the anatomy of a serverless project hosted in GitLab and deployed to Knative](https://youtu.be/IIM8JWhAbNk?t=210). With Google, you have a few options on how to leverage Cloud Run as a deployment target for GitLab CI/CD. As of this week, you can run Cloud Run in three different flavors: \n\n1. **Cloud Run**: This is a fully managed cloud service powered by Knative for serverless apps. GitLab supports deploying to Cloud Run and the full CI/CD workflow to leverage GitLab Runners to build and test functions. GitLab takes in the [`serverless.yml`](https://docs.gitlab.com/ee/update/removals.html) file within the root of your source code repository to define and deploy to Cloud Run.  \n\n2. **Cloud Run for Anthos running on Google Cloud**: This is a managed deployment of Knative on Anthos GKE clusters running on Google Cloud Platform. This enables you to install a managed Cloud Run deployment on top of your own Kubernetes cluster. Similar to above, GitLab also supports deploying to Cloud Run via the full CI/CD workflow, but as of right now, the highest version of Knative supported by GitLab is 0.7. Latest version support for Knative is coming in [GitLab 12.6](/releases/) on Dec. 22, 2019.  \n\n3. **Cloud Run for Anthos running on-premise**: Similar to above, this flavor of Cloud Run enables users to run a managed Cloud Run deployment on top of Anthos GKE On-Prem in your own data center. Currently, Knative v.0.9 is deployed in GKE-OP clusters. GitLab is soon to release support for Knative v0.9 and users can track the progress of this work in [this open issue](https://gitlab.com/gitlab-org/gitlabktl/issues/55) today. If you like what we’re working on, stop by and give us a thumbs up for feedback. So far, internal testing has been very positive and we look forward to formally supporting Cloud Run for Anthos running on-premise in the coming months/releases. The user experience will be almost identical to the prior two use cases listed above as you would expect.\n\n## Where to get started\n\nIf you’re interested in getting started with some sample code, check out our [documentation](https://docs.gitlab.com/ee/update/removals.html) and [sample app project](https://gitlab.com/knative-examples/functions) for reference. Additionally, [here is a walkthrough of deploying a demo app to Cloud Run from GitLab](https://youtu.be/lb_bRRAgEyc?t=1103). If you’re looking to get started with Serverless on Google Cloud Platform, [sign up for GitLab.com here](https://gitlab.com/users/sign_up) and then [sign up for $200 additional free GCP credits](https://cloud.google.com/partners/partnercredit/?PCN=a0n60000006Vpz4AAC).\n",[109,1041,9,231,813],{"slug":4014,"featured":6,"template":686},"gitlab-serverless-with-cloudrun-for-anthos","content:en-us:blog:gitlab-serverless-with-cloudrun-for-anthos.yml","Gitlab Serverless With Cloudrun For Anthos","en-us/blog/gitlab-serverless-with-cloudrun-for-anthos.yml","en-us/blog/gitlab-serverless-with-cloudrun-for-anthos",{"_path":4020,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4021,"content":4026,"config":4031,"_id":4033,"_type":14,"title":4034,"_source":16,"_file":4035,"_stem":4036,"_extension":19},"/en-us/blog/gitlab-supply-chain-security",{"title":4022,"description":4023,"ogTitle":4022,"ogDescription":4023,"noIndex":6,"ogImage":2756,"ogUrl":4024,"ogSiteName":670,"ogType":671,"canonicalUrls":4024,"schema":4025},"Introducing GitLab’s supply chain security direction and landscape","Learn about software supply chain security at GitLab.","https://about.gitlab.com/blog/gitlab-supply-chain-security","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing GitLab’s supply chain security direction and landscape\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sam White\"}],\n        \"datePublished\": \"2022-02-15\",\n      }",{"title":4022,"description":4023,"authors":4027,"heroImage":2756,"date":2317,"body":4029,"category":299,"tags":4030},[4028],"Sam White","\n\n_This blog post and linked pages contain information related to upcoming products, features, and functionality. It is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in the blog post and linked pages are subject to change or delay. The development, release, and timing of products, features, or functionality remain at the sole discretion of GitLab, Inc._ \n\nWe would like to introduce you to our software supply chain security [direction](/direction/supply-chain/) and landscape.\n\nAn emerging concern in the software development space is being able to document the entire supply chain and development progress by creating a chain of custody starting from code creation, build, test, package, and going through deployment. \n\nGitLab's software supply chain security (SSCS) vision includes everything needed to securely deliver and run software with a high degree of confidence that not only your software, but also its surrounding cloud-native infrastructure, has not been compromised. \n\nIn the long-term, our strategy is to become a complete provider for all aspects of SSCS. Providing all of these aspects within a single application not only supports GitLab's broader Single Application Strategy but also provides numerous tangible benefits for users.\n\nAmong other things, using a single application:\n\n1. Minimizes the number of different tools that need to be hardened and monitored.\n1. Reduces the number of potential points of security failure as data is transferred between various tools.\n1. Enables seamless interoperability.\n1. Simplifies visibility and traceability for audits.\n\n## GitLab SSCS Framework\n\nGitLab has put together a framework describing the various aspects that are required to accomplish this based on feedback from customers and inspiration from common standards (such as [SLSA](https://slsa.dev/)), as well as thought leadership from industry analysts. Please note, however, that this framework is not necessarily representative of any other entity's opinion or perspective on the SSCS space.\n\nWe believe that there are five main aspects to consider when providing for a secure, end-to-end software supply chain.\n\n1. **Source** - includes the controls needed to be confident that both internal and external source code is safe from vulnerabilities and has not been compromised in any way.\n1. **Build** - includes rigorous requirements for the security and isolation of build environments as well as the automatic generation of provenance.\n1. **Consumption** - includes the ability to validate authenticity and source of any executed binaries. Supports requirements for securing the underlying host infrastructure itself.\n1. **Management Process** - spans across all other aspects of SSCS and includes both the tools and processes necessary to provide for ongoing visibility into SSCS continuous compliance requirements.\n1. **Tool Security** - spans across all other aspects of SSCS and includes the adoption of best practices for managing the security of the underlying tools themselves.\n\nYou can learn more about the SSCS framework in our [direction](/direction/supply-chain/).\n\n### GitLab helps keep your software supply chain secure\n\nGitLab has [numerous capabilities that support continuous compliance](/blog/gitlabs-newest-continuous-compliance-features-bolster-software/) and a secure software supply chain. Our newly released [“Guide to Software Supply Chain Security”](https://page.gitlab.com/resources-ebook-software-supply-chain-security.html) explains the urgency of protecting the supply chain now and also describes how this can be done while using GitLab.\n\nGitLab is a platform that [plays well with others](/handbook/product/gitlab-the-product/#plays-well-with-others) and can work together with other best-in-class security tools to provide complete end-to-end chain of custody throughout the development and deployment process. GitLab's vision is to partner closely with leading technologies in this space to provide an integrated, turnkey experience for end users.\n\n### What’s next\n\nAs a single DevOps platform, there are many opportunities to rise to the challenge of creating transparency around software components and artifacts. We welcome feedback on our [current position and vision](/direction/supply-chain/#current-position-and-vision) for the long-term direction of GitLab in SSCS. \n\nHere are a few of our near-term projects:\n\n- GitLab's [Runner Core](/direction/verify/runner_core/#strategic-priorities",[875,9,916],{"slug":4032,"featured":6,"template":686},"gitlab-supply-chain-security","content:en-us:blog:gitlab-supply-chain-security.yml","Gitlab Supply Chain Security","en-us/blog/gitlab-supply-chain-security.yml","en-us/blog/gitlab-supply-chain-security",{"_path":4038,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4039,"content":4045,"config":4051,"_id":4053,"_type":14,"title":4054,"_source":16,"_file":4055,"_stem":4056,"_extension":19},"/en-us/blog/gitlab-taught-in-korean-uni",{"title":4040,"description":4041,"ogTitle":4040,"ogDescription":4041,"noIndex":6,"ogImage":4042,"ogUrl":4043,"ogSiteName":670,"ogType":671,"canonicalUrls":4043,"schema":4044},"Schooled in GitLab: Teaching our handbook at a South Korean university","Students at Hankuk University of Foreign Studies tackled our handbook. The students' favorite topics were compensation and remote work.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673044/Blog/Hero%20Images/books-internship-post.jpg","https://about.gitlab.com/blog/gitlab-taught-in-korean-uni","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Schooled in GitLab: Teaching our handbook at a South Korean university\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Guenjun Yoo\"}],\n        \"datePublished\": \"2020-01-29\",\n      }",{"title":4040,"description":4041,"authors":4046,"heroImage":4042,"date":4048,"body":4049,"category":1318,"tags":4050},[4047],"Guenjun Yoo","2020-01-29","\nBusiness students at [Hankuk University of Foreign Studies](http://mis.hufs.ac.kr/) in Seoul, South Korea are studying the GitLab handbook and business model. The students are enthusiastic about GitLab and its story, says lecturer SanJoon Song in an email interview, but there was one problem: Our 3,000+ page handbook is a lot to swallow in one semester.\n\nSo Song had the class divide the handbook into 15 different categories, which different groups of students researched over the course of the semester. At the end of the term, the groups presented a summary of their category to the class.\n\n“Many engineers in Korea said that the GitLab handbook is good to read before starting up a business,” says Song. “However, there is a lot of reading in the handbook; too many pages for me.”\n\nThe level of transparency in the handbook was a revelation to Song and his students.\n\n“We didn't study [the handbook] only to focus on the content itself, but we tried to understand and share about the context of handbook; what conventions GitLab has and what protocols GitLab is trying to develop with its employees by this handbook,” says Song. “In Korea, this is very unusual to share such details of company goals and protocols with entire employees by handbook and for me, this approach is very new and fresh.”\n\n## Inside information\n\nSong was very surprised by how much “insider” information is available in our handbook and says he’s particularly amazed by the detailed explanations of what to do if things go wrong.\n\nOn the other hand, his students were most impressed by the details on [compensation](/handbook/total-rewards/compensation/compensation-calculator/calculator/) and incentives in the handbook, followed closely by the idea of remote work.\n\n“Personally I liked the concept of [‘accept mistakes’](https://handbook.gitlab.com/handbook/values/#accept-mistakes) in the efficiency section,” says Song. “We also talked a lot about GitLab’s [six values](https://handbook.gitlab.com/handbook/values/).”\n\n![Breaking down the handbook](https://about.gitlab.com/images/blogimages/studyingthehandbook.jpg){: .shadow.medium.center}\nStudents in Song's class breaking down the handbook.\n{: .note.text-center}\n\nRemote work was also a big topic of discussion in Song's classroom.\n\n\"Many Koreans are interested in remote work,\" says Song. \"It is really great that people can work anywhere, anytime without having the stress of commuting. Remote work is not common in Korea yet. Only a few software developers are allowed to work from home but that is also partial and in a limited environment only. Many students also want to do the remote work but this is still kind of a dream.”\n\nSong is currently teaching a second GitLab-focused class, this time diving into project management and DevOps by looking at our product and Pivotal Labs. If there is one benefit Song thinks his students have taken away from studying GitLab it’s the importance of communication.\n\n“Communication between employees is one of the most important matters,\" says Song. \"By studying the GitLab handbook, my students and I learned an efficient way of communication between the employer and employees. The handbook explicitly shows how GitLab is trying to do the best way of communication between stakeholders; what is the company goal, why we established the goal and how we are achieving the goal.”\n\nSong hopes to inspire a future generation of entrepreneurs by studying the GitLab handbook in the classroom.\n\n“My students have studied the GitLab handbook for one semester. I hope this study can be their reference when they start their startup, so they can create their company goals and prototype in the direction of success, like GitLab.\"\n\n_If you’re interested in seeing more of Song’s curriculum, he shared it\n[here](https://docs.google.com/document/d/1u5J6Ypj6zwQJVjmrl1wd0eIv7Q_TYLJysDquhGMJimA/edit). You'll need to scroll down a bit._\n\nCover image by [Patrick Tomasso](https://unsplash.com/@impatrickt) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[813,267,9,682,3350,936],{"slug":4052,"featured":6,"template":686},"gitlab-taught-in-korean-uni","content:en-us:blog:gitlab-taught-in-korean-uni.yml","Gitlab Taught In Korean Uni","en-us/blog/gitlab-taught-in-korean-uni.yml","en-us/blog/gitlab-taught-in-korean-uni",{"_path":4058,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4059,"content":4065,"config":4071,"_id":4073,"_type":14,"title":4074,"_source":16,"_file":4075,"_stem":4076,"_extension":19},"/en-us/blog/gitlab-together-with-terratag-open-source-to-help-you-manage-terraform-resources",{"title":4060,"description":4061,"ogTitle":4060,"ogDescription":4061,"noIndex":6,"ogImage":4062,"ogUrl":4063,"ogSiteName":670,"ogType":671,"canonicalUrls":4063,"schema":4064},"How to use Terratag to manage Terraform tags automatically","This blog addresses how you can do that easily and automatically when using Terraform and Terratag (an open source project by env0) on top of the Gitlab CI/CD platform.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682137/Blog/Hero%20Images/blog-image.png","https://about.gitlab.com/blog/gitlab-together-with-terratag-open-source-to-help-you-manage-terraform-resources","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use Terratag to automatically manage tags and labels for your Terraform Code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2021-09-14\",\n      }",{"title":4066,"description":4061,"authors":4067,"heroImage":4062,"date":4068,"body":4069,"category":791,"tags":4070},"How to use Terratag to automatically manage tags and labels for your Terraform Code",[2120],"2021-09-14","\n\nWhen using infrastructure as code (IaC) on a public cloud provider, it's important to use tags and labels to organize your IaC using their complementary services. Terratag, an open source project developed by [env0](http://www.env0.com), can be used with Terraform and placed on top of the GitLab CI/CD platform, making tagging and labeling IaC easier and more efficient.\n\nGitLab and Terraform make it easy to tag and label infrastructure as code.\n\n## Inside your toolbox\n\n[GitLab](https://about.gitlab.com/) the industry's leading DevOps platform. Not long ago, we announced the ability to control Terraform deployments, remote state management, private module registry, and merge request integration for Terraform. This gives users a range of solutions for running CI/CD for Terraform code and managing it on a large scale.\n\n[Terraform](https://www.terraform.io/) is the most widely adopted IaC framework out there. It's an open source project that is maintained by HashiCorp, and was launched in 2014 based on HashiCorp configuration language (HCL). Terraform is a command line (CLI) tool that can help manage and provision external resources such as public cloud infrastructure, private cloud infrastructure, network appliances, and SaaS and PaaS vendors. All major clouds are supported where AWS, Azure, and GCP have an official provider that are maintained internally by the HashiCorp Terraform team.\n\nAll major cloud providers support tagging/labeling for most of their resources using their Terraform provider, to help users manage infrastructure more efficiently. In this blog post, we provide some examples that show how it is easy to tag and label your IaC using Terratag with GitLab CI/CD – a core component of our DevOps platform.\n\n### How to automatically manage tags/labels for your Terraform Code\n\nFirst, we'll take a deep dive into the importance of tagging and labeling your IaC when using a public cloud provider. Next, we'll explain how to manage tags and labels for your IaC easily and automatically when using Terraform and [Terratag](https://terratag.io/) on top of the Gitlab CI/CD platform, with simple code examples for an end-to-end solution.\n\n### Why tags/labels are so important\n\nAll major cloud providers allow tagging (or labeling) cloud resources. Moreover, they encourage you to use tags or labels to do things like manage budgets, set up powerful automation algorithms, and unlock insights offered by the cloud providers and independent third parties.\n\nBy harnessing powerful IaC frameworks like Terraform, users can define and tag cloud resources for verticals ranging from the development to ops, as well as business needs.\n\n### The problem with tagging today\n\nTagging is a manual process, which can make it a real hassel, particularly as your infrastructure grows. Repeatedly tagging dozens or even hundreds of cloud resources is inefficient, but that's just the start of the problems. Manually tagging fails in other important ways too:\n\n* **Standards are hard to maintain if they're not enforced**: Your entire team needs to be on the same page – keeping an eye out for newly added cloud resources, making sure they include those tags or you may miss some significant resources when acting on that metadata later.\n\n* **Harder to change**: Applying changes to tag structure across the board quickly becomes unmanageable.\n\n* **Metadata can obscure what's important**: While tagging all this metadata is useful for slicing and dicing later, having it everywhere on your resources pollutes your IaC, making it more cumbersome and harder to maintain.\n\n* **Migration**: What if you already have plenty of Terraform modules with cloud resources, which weren't tagged to begin with? Trying to tag them all now can be painstaking work.\n\nAt the end of the day, IaC is, well, just code, and as is the case with any code, repetition makes it harder to fix errors, apply enhancements, make adjustments and maintain readability. As tagging is a cross-cutting concern, the lack of proper layering or aspect control makes it harder to retrofit existing solutions.\n\n### Terratag to the rescue\n\n[Terratag](https://terratag.io/) allows the user to automatically tag or label all the resources in their Terraform code. It also automatically tags all of your Terraform sub-modules, even if they don't expose tags as an input. Terratag is a CLI tool that works with all the major cloud providers including AWS, Google Cloud Platform, and Microsoft Azure, and solves the complicated problem of tagging resources across applications at scale. It eliminates the risk of human error, can retroactively tag IaC resources that were previously deployed, and helps you easily use the tags for various purposes, like cost management, organization, reporting, etc.\n\n### How to run Terraform with GitLab\n\nGitlab offers a wide range of tools for Terraform, starting with a [managed remote state](https://docs.gitlab.com/ee/user/infrastructure/terraform_state.html), running your deployment with [Gitlab CI/CD](https://docs.gitlab.com/ee/ci/), [Terraform private module registry](https://docs.gitlab.com/ee/user/packages/terraform_module_registry/index.html#publish-a-terraform-module-by-using-cicd) and [integration in Merge Requests (MRs)](https://docs.gitlab.com/ee/user/infrastructure/mr_integration.html) and getting Terraform plan output information into an MR.\n\nIn this tutorial, we use Gitlab CI/CD to deploy a Terraform repository into Google Cloud Platform and let Gitlab manage our remote state.\n\n### Combining Terraform wtih GitLab in GCP\n\nWe explain how to implement and combine Terraform and GitLab with ease, starting with building the deployment of our Terraform code using GitLab and then see the results in Google Cloud platform.\n\n### Terraform code with GitLab as a backend\n\nWe're using Terraform to deploy a simple VPC and a VM into GCP. We will use GitLab Terraform backend configuration, which is based on the Terraform [HTTP backend](https://www.terraform.io/docs/language/settings/backends/http.html). The beauty of this configuration is that you don't need to add any configuration regarding authentication when running it inside Gitlab CI/CD. GitLab will automatically set up all the relevant configuration for your backend according to the project it's running in.\n\nThe code is available in [the Terratag project created for this blog post](https://gitlab.com/env0/terratag-blog-post/-/tree/main).\n\n### Set up variables\n\nThis Terraform code needs some variables in order to run. We can set these up using Gitlab CI/CD variables. Under your Gitlab Project, go to Settings > CI/CD and expand the variable section. We will need to add three variables:\n\n* `GOOGLE_CREDENTIALS`: This variable value should be the JSON of your Google Cloud service account. [See this documentation](https://cloud.google.com/iam/docs/creating-managing-service-account-keys) on how to create a service account key.\n\n* `TF_VAR_project_id`: Your Google Cloud project ID.\n\n* `TF_VAR_machine_type`: The VM type you would like to create.\n\n![tg_1](https://about.gitlab.com/images/blogimages/2021-terratag-env0/tg_1.png)\n\n### Set up Gitlab CI/CD\n\nSetting up a Gitlab CI/CD for Terraform is really easy – all you need to do is add a simple file in your repository called `.gitlab-ci.yml` and add a configuration for each step of your Terraform deployment. We're going to add the following steps to our pipeline:\n\n* **Plan**: This step will run the `terraform init` and `terraform plan` commands and in the middle will also run Terratag to tag all the relevant resources. At the end it will also output the Terraform plan as a `JSON` file and create an artifact.\n\n* **Apply**: This step will run the `terraform apply` command. It depends on the plan to finish successfully. This step is done manually so we can check the plan before applying the changes.\n\n[https://gitlab.com/env0/terratag-blog-post/-/blob/main/.gitlab-ci.yml](https://gitlab.com/env0/terratag-blog-post/-/blob/main/.gitlab-ci.yml)\n\nSince Terratag scans the entire Terraform code, including any Terraform modules you may be using, we need to run the `terraform init` command before we run the Terratag command, since the init command will download all the relevant modules so Terratag can scan them.\n\nWe can see two resources in this code:\n\n* `google_compute_network`: This resource sets up the VPC. Terratag will not apply labels since the [compute network doesn't allow labels](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_network).\n\n* `google_compute_instance`: This resource sets up the VM. Terratag applies the label that the user defines.\n\nHere is the output of Terratag on this Terraform code:\n\n![tg_2](https://about.gitlab.com/images/blogimages/2021-terratag-env0/tg_2.png)\n\nThis is what this pipeline will look like in the Gitlab UI. When the Terraform plan step is successfully completed, you can manually apply the changes after reviewing the plan, which is also available as an artifact – meaning it can be downloaded and viewed locally.\n\n![tg_3](https://about.gitlab.com/images/blogimages/2021-terratag-env0/tg_3.png)\n\n### How to apply labels on GCP\n\nAs we mentioned before, labeling your resources has a lot of technical, operations, and business benefits. This blog post focuses on the cost benefit of effectivelabeling.\n\nFirst, let's see that the VM we've created is actually tagged correctly.\n\nStart by heading to the Google Cloud console. Next, go to the Compute Engine page and, under VM, search for the VM we've just created. Then, go into the VM Instance details page and see that the label exists with the right value.\n\n![tg_4](https://about.gitlab.com/images/blogimages/2021-terratag-env0/tg_4.png)\n\nNext, go to the Billing section and select \"Reports\". On the right hand side of the page there are filters. Under labels, we can filter the label key and the label value and get the cost of those resources.\n\n![tg_5](https://about.gitlab.com/images/blogimages/2021-terratag-env0/tg_5.png)\n\n### Automate labeling using Terratag\n\nTags and labels play a crucial role in managing a large-scale infrastructure projects and offer significant benefits when using tools such as [Gitlab CI/CD](https://docs.gitlab.com/ee/ci/). [Terratag](https://www.terratag.io/) has the advantage of easing the transition for Terraform users. Adopting Terratag for use with GitLab CI/CD and Terraform will also help establish a standard in your organization when it comes to use of tags and labels, eliminating the need for human intervention on a large-scale project to change your current Terraform code base.\n\nFeel free to check out the [code base](https://gitlab.com/env0/terratag-blog-post) for this blog post and leave us feedback.\n\n_Blog post coauthor [Omry Hay](https://www.linkedin.com/in/omryhay/) is the co-founder and CTO of [env0](http://www.env0.com)_\n",[9,534],{"slug":4072,"featured":6,"template":686},"gitlab-together-with-terratag-open-source-to-help-you-manage-terraform-resources","content:en-us:blog:gitlab-together-with-terratag-open-source-to-help-you-manage-terraform-resources.yml","Gitlab Together With Terratag Open Source To Help You Manage Terraform Resources","en-us/blog/gitlab-together-with-terratag-open-source-to-help-you-manage-terraform-resources.yml","en-us/blog/gitlab-together-with-terratag-open-source-to-help-you-manage-terraform-resources",{"_path":4078,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4079,"content":4085,"config":4091,"_id":4093,"_type":14,"title":4094,"_source":16,"_file":4095,"_stem":4096,"_extension":19},"/en-us/blog/gitlab-top-devops-tooling-metrics-and-targets",{"title":4080,"description":4081,"ogTitle":4080,"ogDescription":4081,"noIndex":6,"ogImage":4082,"ogUrl":4083,"ogSiteName":670,"ogType":671,"canonicalUrls":4083,"schema":4084},"The top DevOps tooling metrics and targets at GitLab","Here is how we measure DevOps success and why we always try to look forward.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665635/Blog/Hero%20Images/blog-performance-metrics.jpg","https://about.gitlab.com/blog/gitlab-top-devops-tooling-metrics-and-targets","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The top DevOps tooling metrics and targets at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mek Stittri\"}],\n        \"datePublished\": \"2022-04-05\",\n      }",{"title":4080,"description":4081,"authors":4086,"heroImage":4082,"date":4088,"body":4089,"category":791,"tags":4090},[4087],"Mek Stittri","2022-04-05","\n\nA successful DevOps practice relies heavily on metrics. Here at GitLab, we use seven key DevOps metrics to measure engineering efficiency and productivity.  Like many teams, we use industry standard metrics, but in some cases, we approach this data with a unique GitLab point of view. Here’s the first in a multipart look at the DevOps metrics we at GitLab think are most critical for success. Compare your metrics and results with ours, and [let’s get a conversation started](https://gitlab.com/gitlab-com/www-gitlab-com/-/issues/13202).\n\n## Master pipeline stability\n\nIt’s important to be able to measure the stability of the GitLab project’s master branch pipeline. This metric tells us how stable the main branch is, and ensures engineers are checking out code that’s in good shape. [Merge trains](https://gitlab.com/gitlab-org/quality/team-tasks/-/issues/195) are key to this effort. \n\nOur target percentage for [master pipeline stability](/handbook/engineering/quality/performance-indicators/#master-pipeline-stability  ) is above 95%.\n\n![master pipeline stability](https://about.gitlab.com/images/blogimages/dometrics1.png)\n\n## Review app deployment success rate\n\nAt GitLab we take [review apps](https://docs.gitlab.com/ee/ci/review_apps/) seriously.  We measure their success rate so we can understand the stability of our first deployed environment after code change. Review apps are spun up at MR submission. It’s important to monitor our review app successful deployments because it’s the first place where code is integrated and deployed as one unit. This metric ensures the codebase can be installed, tested, and made available for the team to preview their changes before merging into the main master branch. \n\nOur target for [review application deployment success](/handbook/engineering/quality/performance-indicators/#review-app-deployment-success-rate) is above 99%. \n\n![review app deployment success](https://about.gitlab.com/images/blogimages/dometrics2.png)\n\n## Time to First Failure\n\nTime to First Failure (TtFF, pronounced as “teuf”) measures how fast we are providing feedback to engineers. This metric examines how long it takes from pipeline creation to the first actionable failed build. The idea is that if the commit is going to fail, it should fail fast and the fail signal should get to the engineers as quickly as possible. The shorter the time to first failure, the faster the feedback loop, and faster time to action to address those failures. \n\nOur [TtFF target](/handbook/engineering/quality/performance-indicators/#time-to-first-failure) is less than 15 minutes.\n\n![TtFF or Time to First Failure](https://about.gitlab.com/images/blogimages/dometrics3.png)\n\n## Open S1 bug age\n\nThis metric focuses on the age of open S1 bugs. Many organizations measure time to close bugs. At Gitlab we focus on the age of bugs remaining. We structure the metric to focus on work that is remaining and can be actioned on. If we only measure time to close of fixed defects, we may miss addressing older defects and unintentionally incentivize closing of only newer defects. We like to look forward by asking ourselves “What’s left?” and “What can be done now?” rather than only looking backward at what’s already been done.\n\nOur target for [S1 open bug age](/handbook/engineering/quality/performance-indicators/#s1-oba) is under 100 days.\n\n![Open S1 bug age](https://about.gitlab.com/images/blogimages/dometrics4.png)\n\n## Open S2 bug age\n\nThis metric is similar to the open S1 bug age, but is focused on S2 bugs. Again, we measure the age of remaining open bugs rather than focusing on bugs that have been closed.\n\nOur target for the [open S2 bug age](/handbook/engineering/quality/performance-indicators/#s2-oba) metric is below 300 days.\n\n![Open S2 bug age](https://about.gitlab.com/images/blogimages/dometrics5.png)\n\n## Merge request pipeline duration\n\nWhen a pipeline is started for a merge request, how long does it take to run? This metric focuses on the duration of merge request pipelines and its time efficiency.  Within the total duration we break the data down into multiple  stages The team then iterates and improves time efficiencies of each stage of the pipeline. This is a key building block for improving GitLab’s code cycle time and efficiency and ensures the code is merged in a timely manner.\n\nOur target for this metric is below 45 minutes.\n\n![MR pipeline duration](https://about.gitlab.com/images/blogimages/dometrics6.png)\n\n## MR pipeline costs\n\nWe use this metric at GitLab to help us determine our Merge Request Pipeline cost efficiency. We look at the total costs for the CI runners machines for MR pipelines. Once we’ve determined that figure, we divide it by the number of merge requests. This helps us monitor cost while fine-tuning efficiency. Speed and cost moves in different directions. To help speed up you can increase resources, but it comes at a cost. Monitoring this metric enables us to be balanced and have a healthy trade-off between optimizing for cost and speed.\n\nOur target for the [MR pipeline costs](/handbook/engineering/quality/performance-indicators/#merge-requests-pipeline-cost) metric is below 7.50.\n\n![MR pipeline costs](https://about.gitlab.com/images/blogimages/dometrics7.png)\n\n## What DevOps tooling metrics are most effective for your team?\n\nWe’d like to hear what you think of our choices, and our targets, and what works, or doesn’t, for you. [Chime in here](https://gitlab.com/gitlab-com/www-gitlab-com/-/issues/13202).\n",[9,728,1158],{"slug":4092,"featured":6,"template":686},"gitlab-top-devops-tooling-metrics-and-targets","content:en-us:blog:gitlab-top-devops-tooling-metrics-and-targets.yml","Gitlab Top Devops Tooling Metrics And Targets","en-us/blog/gitlab-top-devops-tooling-metrics-and-targets.yml","en-us/blog/gitlab-top-devops-tooling-metrics-and-targets",{"_path":4098,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4099,"content":4105,"config":4111,"_id":4113,"_type":14,"title":4114,"_source":16,"_file":4115,"_stem":4116,"_extension":19},"/en-us/blog/gitlab-uses-anthropic-for-smart-safe-ai-assisted-code-generation",{"title":4100,"description":4101,"ogTitle":4100,"ogDescription":4101,"noIndex":6,"ogImage":4102,"ogUrl":4103,"ogSiteName":670,"ogType":671,"canonicalUrls":4103,"schema":4104},"GitLab uses Anthropic for smart, safe AI-assisted code generation","Anthropic’s Claude AI model supports the delivery of helpful, trusted code in GitLab Duo Code Suggestions.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669095/Blog/Hero%20Images/gitlabduo.png","https://about.gitlab.com/blog/gitlab-uses-anthropic-for-smart-safe-ai-assisted-code-generation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab uses Anthropic for smart, safe AI-assisted code generation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kevin Chu\"}],\n        \"datePublished\": \"2024-01-16\",\n      }",{"title":4100,"description":4101,"authors":4106,"heroImage":4102,"date":4108,"body":4109,"category":1178,"tags":4110},[4107],"Kevin Chu","2024-01-16","GitLab recently launched GitLab Duo Code Suggestions into general availability. Code Suggestions includes the ability to generate algorithms or code blocks directly within the developer's IDE, a capability that uses [Anthropic's](https://www.anthropic.com/) generative AI model, [Claude](https://www.anthropic.com/index/claude-2-1). Integrated into the GitLab Duo portfolio of AI-assisted features, Claude is compatible with GitLab’s principles of [transparency and privacy](https://about.gitlab.com/blog/seven-questions-to-ask-your-devops-provider/) by design and provides a high-integrity foundation for code generation.\n\nIn this post, you'll learn the advantages of code generation and how GitLab, together with Anthropic, is leveraging AI to responsibly boost developer productivity.\n\n## How AI-assisted code generation works\n\nCode Suggestions is incredibly useful as a coding companion that shows the suggestions as a developer types. It helps save developer time and keystrokes, reducing the effort for rote tasks and giving developers time back in their day. But what if a developer wants to do even more with generative AI?\n\nEnter code generation.\n\nImagine needing to write a new complex function based on an unfamiliar algorithm, or write a large amount of boilerplate code. Instead of struggling through these tasks with gritted teeth, code generation allows developers to simply define what they want to do in comments or multi-line comment blocks, and then Code Suggestions generates the code from there. \n\nHere is an example of Code Suggestions generating a JavaScript function that calculates the Levenshtein distance, a string metric useful for comparing the difference between two sequences:\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752175962/Blog/lkrk16unp4dcy3c4zwvw.gif\" alt=\"Code Suggestions generating JavaScript function\" width=\"100%\" height=\"auto\">\n\nHere is another example showing a multi-line comment in Python. We want Code Suggestions to generate a Tornado Web Server that does three things: log in, run a scan, and review the results. By providing the specific instructions, including details such as the framework and the components to use,, Code Suggestions was able to generate a Tornado App, despite this author being unfamiliar with Tornado. \n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752175967/Blog/pxcdppnpzwfhgopxh999.gif\" alt=\"Code Suggestions generating Tornado app\" width=\"100%\" height=\"auto\">\n\nSafety through focus and trustworthiness\nDevelopers expect AI coding assistants to not only be helpful, but also accurate and safe. The system should generate precisely what is asked for while limiting deviation and [hallucination](https://www.ibm.com/topics/ai-hallucinations). Customers want assurances that AI-generated code can be trusted.\n\nThroughout GitLab's evaluation of certain code generation models, Claude stood out for its ability to mitigate distracting, unsafe, or deceptive behaviors. Claude also demonstrated consistent and accurate code generation throughout our testing.\n\nGitLab's use of Anthropic's Claude enables Code Suggestions to balance automation with trust. Code Suggestions helps users become more efficient without sacrificing reliability — a win for augmented development.\n\n## What’s next\n\nReady to experience the future of code generation? Start your [free trial of GitLab Duo](https://about.gitlab.com/gitlab-duo/) today and unlock the power of AI-assisted development!",[1181,231,2243,9],{"slug":4112,"featured":91,"template":686},"gitlab-uses-anthropic-for-smart-safe-ai-assisted-code-generation","content:en-us:blog:gitlab-uses-anthropic-for-smart-safe-ai-assisted-code-generation.yml","Gitlab Uses Anthropic For Smart Safe Ai Assisted Code Generation","en-us/blog/gitlab-uses-anthropic-for-smart-safe-ai-assisted-code-generation.yml","en-us/blog/gitlab-uses-anthropic-for-smart-safe-ai-assisted-code-generation",{"_path":4118,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4119,"content":4125,"config":4130,"_id":4132,"_type":14,"title":4133,"_source":16,"_file":4134,"_stem":4135,"_extension":19},"/en-us/blog/gitlab-value-stream-analytics",{"title":4120,"description":4121,"ogTitle":4120,"ogDescription":4121,"noIndex":6,"ogImage":4122,"ogUrl":4123,"ogSiteName":670,"ogType":671,"canonicalUrls":4123,"schema":4124},"The role of Value Stream Analytics in GitLab's DevOps Platform","Better DevOps teams start with value stream management. Here's how to get the most out of GitLab's Value Stream Analytics.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668041/Blog/Hero%20Images/Understand-Highly-Technical-Spaces.jpg","https://about.gitlab.com/blog/gitlab-value-stream-analytics","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The role of Value Stream Analytics in GitLab's DevOps Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2022-01-24\",\n      }",{"title":4120,"description":4121,"authors":4126,"heroImage":4122,"date":4127,"body":4128,"category":769,"tags":4129},[2120],"2022-01-24","\n\n***\"Whenever there is a product for a customer, there is a value stream. The challenge lies in seeing it!\"*** *Learning to See - Shook & Rother*\n\nEvery company today is a software company so the level of innovation and delivery has a direct impact on revenue generation. In order to be successful, businesses must deliver an amazing digital experience, keep up with the latest technologies, deliver value at the speed demanded by customers, and do it all with zero tolerance for outages or security breaches. That's where value stream management comes into play.\n\n*“If you can’t describe what you are doing as a value stream, you don’t know what you’re doing.”* *(Martin, K. & Osterling, M. (2014). Value Stream Mapping. McGraw-Hill, p. 15.)*\n\nValue stream management(VSM) is a change in development mindset that puts the customer at the center. VSM allows teams to measure and improve the software delivery and value flow to customers. The development process is outlined from ideation until customer value realization. The focus is no longer on features and functionality – instead, organizations ensure the efforts and resources invested to deliver value to customers will improve flows that are causing bottlenecks, optimizing the cycle and shortening time to market. \n\nYou can learn more on [Value Stream Mapping](/topics/devops/value-stream-mapping/) here\n\n## An overview of GitLab's Value Stream Analytics \n\nAs part of [GitLab's DevOps Platform](/solutions/devops-platform/), Value Stream Analytics provides one shared view of the team's velocity. With insights into how long it takes the team to move from planning to monitoring, it's possible to pinpoint areas for improvement. Value Stream Analytics measures the time spent for each project or group. It displays the median time spent in each stage of the process by measuring from its start event to its end event. It helps identify bottlenecks in the development process, enabling management to uncover, triage, and identify the root cause of slowdowns in the software development life cycle and to quickly act on them to improve efficiency.\n\n![vsa](https://about.gitlab.com/images/blogimages/vsa/vsa_1.png)\n\n## Why are Value Stream Analytics important? \n\nThe process of efficient software delivery starts by understanding where the slowest parts are, and what are the root causes behind them. With this information it's possible to build a plan for optimization.  \n\n## Which DevOps stages are tracked? \n\nThe stages tracked by Value Stream Analytics by default represent GitLab's DevOps Platform flow - \n**Issue**, **Plan**, **Code**, **Test**, **Review** and **Staging**.  \n\n![vsa](https://about.gitlab.com/images/blogimages/vsa/vsa_stages.png)\n\n## How to customize GitLab's Value Stream Analytics \n\nNote: The stages can be customized in group evel Value Stream Analytics; currently no customization is available in the project level. \n\nClick Edit in the Value Stream Management \n\n![vsa](https://about.gitlab.com/images/blogimages/vsa/vsa_4.png)\n\nClick Add another stage \n\n![vsa](https://about.gitlab.com/images/blogimages/vsa/vsa_5.png)\n\nDefine stage name, and select start event and end event from the list. \n\n![vsa](https://about.gitlab.com/images/blogimages/vsa/vsa_6.png)\n\n![vsa](https://about.gitlab.com/images/blogimages/vsa/vsa_7.png)\n\n## The key metrics \n\nThe dashboard includes useful key metrics which help to understand the team performance. If, for example, the values of **new issues**, **commits** and **deploys** are high, it's clear a team is productive. The DevOps metrics commonly known as the **DORA (DevOps Research and Assessment) 4**. The [DORA 4 metrics](https://cloud.google.com/blog/products/devops-sre/using-the-four-keys-to-measure-your-devops-performance) show the value the team delivered to customers.\n\n**Deployment Frequency** shows how often code is deployed to production and brings value to end users. **Lead time for changes** measures how long it takes a change to get into production. Like deployment frequency, this metric measures team velocity.\n\n![vsa](https://about.gitlab.com/images/blogimages/vsa/vsa_metrics.png)\n\n## The importance of Value Stream Analytics within GitLab\n\nGitLab is a complete DevOps Platform, delivered as a single application. As such, teams use the same application during the development process from planning to monitoring. One of the benefits of being a single application for the entire DevOps lifecycle is that the data flows from all DevOps stages and is available for analysis, so Value Stream Analytics correlates and identifies how teams are spending their time without the need to integrate with an external tool. \n\nLearn more about [Value Stream Analytics for projects](https://docs.gitlab.com/ee/user/analytics/value_stream_analytics.html) and [Value Stream Analytics for groups](https://docs.gitlab.com/ee/user/group/value_stream_analytics/).\n\nTake a deeper dive into what DORA calls [elite DevOps teams](/blog/how-to-make-your-devops-team-elite-performers/).\n\n\n\n\n\n\n\n\n\n\n",[9,1040,683],{"slug":4131,"featured":6,"template":686},"gitlab-value-stream-analytics","content:en-us:blog:gitlab-value-stream-analytics.yml","Gitlab Value Stream Analytics","en-us/blog/gitlab-value-stream-analytics.yml","en-us/blog/gitlab-value-stream-analytics",{"_path":4137,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4138,"content":4143,"config":4150,"_id":4152,"_type":14,"title":4153,"_source":16,"_file":4154,"_stem":4155,"_extension":19},"/en-us/blog/gitlab-value-stream-management-and-dora",{"title":4139,"description":4140,"ogTitle":4139,"ogDescription":4140,"noIndex":6,"ogImage":1449,"ogUrl":4141,"ogSiteName":670,"ogType":671,"canonicalUrls":4141,"schema":4142},"Improving visibility: GitLab's value stream and DORA metrics","Optimize DevOps with the new DORA metrics in GitLab Value Stream Management.","https://about.gitlab.com/blog/gitlab-value-stream-management-and-dora","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Break the black box of software delivery with GitLab Value Stream Management and DORA Metrics\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Haim Snir\"}],\n        \"datePublished\": \"2022-06-20\",\n      }",{"title":4144,"description":4140,"authors":4145,"heroImage":1449,"date":4147,"body":4148,"category":769,"tags":4149},"Break the black box of software delivery with GitLab Value Stream Management and DORA Metrics",[4146],"Haim Snir","2022-06-20","\n\nOur customers frequently tell us that despite being very effective DevOps practitioners, they still struggle to build a data-driven DevOps culture. They find it especially hard to answer the fundamental question:\n\n_What are the right things to measure?_\n\nThis becomes more challenging in enterprise organizations when there are hundreds of different development groups, and there's no normalization between how things are done or measured. Because of this, we see a strong interest from customers for metrics that would allow them to standardize between teams and benchmark themselves against the industry.\n\n![Value Streams Analytics helps you visualize and manage the DevOps flow from ideation to customer delivery.](https://about.gitlab.com/images/blogimages/2022-06-dora-vsa-overview.png){: .shadow}\nValue Streams Analytics helps you visualize and manage the DevOps flow from ideation to customer delivery.\n{: .note.text-center}\n\n## What Are DORA Metrics? \n\nWith the continued acceleration of digital transformation, most organizations realize that technology delivery excellence is a must for long-term success and competitive advantage. After seven years of data collection and research, the [DORA's State of DevOps research program](https://www.devops-research.com/research.html) has developed and validated four metrics that measure software delivery performance: [(1) deployment frequency, (2) lead time for changes, (3) time to restore service and (4) change failure rate.](https://docs.gitlab.com/ee/user/analytics/#devops-research-and-assessment-dora-key-metrics) \n\nIn GitLab, The One DevOps Platform, [Value Stream Analytics (VSA)](/solutions/value-stream-management/) surfaces a single source of insight for each stage of the software development process. The analytics are available out of the box for teams to drive performance improvements.\n\n## What does DORA bring to Value Stream Analytics?\n\nValue Stream Analytics (VSA) measures [the entire journey from customer request to release](https://docs.gitlab.com/ee/user/group/value_stream_analytics/) and automatically displays the overall performance of the stream. Each stage in the value stream is transparent and compliant in a shared experience for everyone in the company. \n\nThis makes the VSA the single source of truth (SSoT) about what's happening within the entire software supply chain, with DORA’s metrics as the key measure of the value stream outputs. \n\n## How do Value Stream Analytics work?\n\nValue stream analytics measures the median time spent by issues or merge requests in each development stage.\n\nAs an example, a stage might begin with the addition of a label to an issue and end with the addition of another label:\n\n![Value stream analytics measures each stage from its start event to its end event.](https://about.gitlab.com/images/blogimages/2022-06-dora-vsa-stage.png){: .shadow}\nValue stream analytics measures each stage from its start event to its end event.\n{: .note.text-center}\n\nFor each stage, a table list displays the workflow items filtered in the context of that stage. [In stages based on labels](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#label-based-stages-for-custom-value-streams), the table will list Issues, and in stages based on Commits, it will list MRs:\n\n![The VSA MR table provides a deeper insight into stage time breakdown .](https://about.gitlab.com/images/blogimages/2022-06-dora-vsa-mr.png){: .shadow}\nThe VSA MR table provides a deeper insight into stage time breakdown.\n{: .note.text-center}\n\nThe tables provide a deep dive into the stage performance and allow users to answer questions such as:\n\n- How to easily see bottlenecks that are slowing down the delivery of value to customers?\n- How to reduce the time spent in each stage so I can deliver features faster and stay competitive? \n- How can we develop code faster?\n- How can we hand off to QA faster?  How can we push changes to Production more quickly?\n\nUsing the Filter results text box, you can filter by a project (example below) or parameter (e.g., Milestone, Label). \n\n![Value stream analytics filtering.](https://about.gitlab.com/images/blogimages/2022-06-dora-vsa-filter.png){: .shadow}\nValue stream analytics filtering.\n{: .note.text-center}\n\nNo login is required to view [Value stream analytics for projects](https://gitlab.com/gitlab-org/gitlab/-/value_stream_analytics) where you can become familiar with stream filtering, default stages and deep-dive tables. For a full view of the DORA metrics, you have to log in with your GitLab [Ultimate-tier](https://about.gitlab.com/pricing/) account or sign up for a [free trial](https://about.gitlab.com/free-trial/).\n\n## How to understand DevOps maturity and benchmark progress with the DORA metrics?\n\nDORA metrics can also provide answers to questions not related to VSA, such as:\n\n- How to become an elite team of DevOps professionals?\n- How do I perform vs. industry standards? \n- Is the organization better at DevOps this year than last?\n\n## Learn more about VSA and DORA:\n\n- Check out the GitLab Speed Run about DORA metrics in VSA:\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/wQU-mWvNSiI\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\n- [GitLab DORA metrics API documentation](https://docs.gitlab.com/ee/api/dora/metrics.html)\n\n- [Step-by-step instructions for creating a custom value stream](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#create-a-value-stream-with-gitlab-default-stages)\n",[855,9,916,1040,683],{"slug":4151,"featured":6,"template":686},"gitlab-value-stream-management-and-dora","content:en-us:blog:gitlab-value-stream-management-and-dora.yml","Gitlab Value Stream Management And Dora","en-us/blog/gitlab-value-stream-management-and-dora.yml","en-us/blog/gitlab-value-stream-management-and-dora",{"_path":4157,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4158,"content":4164,"config":4169,"_id":4171,"_type":14,"title":4172,"_source":16,"_file":4173,"_stem":4174,"_extension":19},"/en-us/blog/gitlabs-2021-survey-uncovers-a-new-devops-maturity-model",{"title":4159,"description":4160,"ogTitle":4159,"ogDescription":4160,"noIndex":6,"ogImage":4161,"ogUrl":4162,"ogSiteName":670,"ogType":671,"canonicalUrls":4162,"schema":4163},"GitLab's 2021 Survey uncovers a new DevOps maturity model","Our 2021 Global DevSecOps Survey found dramatic advances in DevOps maturity including faster release/deployment cycles, increased automation and improved security postures.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664041/Blog/Hero%20Images/open-devops.png","https://about.gitlab.com/blog/gitlabs-2021-survey-uncovers-a-new-devops-maturity-model","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's 2021 Survey uncovers a new DevOps maturity model\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2021-05-04\",\n      }",{"title":4159,"description":4160,"authors":4165,"heroImage":4161,"date":4166,"body":4167,"category":679,"tags":4168},[851],"2021-05-04","\n_Our 2022 GitLab DevSecOps Survey has the latest insights from over 5,000 DevOps professionals. Download and [read the full survey](/developer-survey/)._\n\nIn the midst of a global pandemic and a new way of working, teams got serious about what matters most, creating what amounts to a new [DevOps maturity model](/stages-devops-lifecycle/). GitLab’s  just-released 2021 Global DevSecOps Survey found sharp increases in automation, release cadences, continuous deployments, and security postures, as well as a growing reliance on cutting edge technologies, including artificial intelligence and machine learning. Nearly 4300 people shared their struggles and successes, and demonstrated a commitment to DevOps maturity like we’ve never seen before.\n\nWhat does this new DevOps maturity model look like? Well for one thing, it looks like it’s working. We think the year over year growth statistics speak for themselves:\n\n* 60% of developers are releasing code 2x faster than before, thanks to DevOps – up 25% from (pre-pandemic) 2020.\n* 72% of security pros rated their organizations’ security efforts as “good” or “strong” – up 13% over 2020.\n* 56% of ops teams members said they are “fully” or mostly automated – up 10% from 2020.\n* Almost 25% of respondents claimed to have full test automation – up 13% from 2020.\n* 75% of teams are either using AI/ML or bots for test/code review, or they’re planning to – up 41% from 2020.\n* Last year dev, sec, and ops said they needed [better communication and collaboration skills](/blog/collaboration-communication-best-practices/) for their future careers. This year, after an intense period of enforced soft skills, their priorities have shifted dramatically to AI/ML (devs), subject matter expertise (sec), and advanced programming (ops). \n\n## A 2021 DevOps maturity model\n\nAs we found in last year’s survey, [DevOps roles continue to change](/blog/software-developer-changing-role/), with developers taking on tasks usually associated with test and ops, ops focusing on the cloud and infrastructure, and security continuing to be part of cross-functional teams. The evolving nature of DevOps is hardly surprising: Fully 43% of our survey respondents have been doing DevOps for between three and five years - that’s the sweet spot where they’ve known success and are well-seasoned. But that “sweet spot” didn’t keep them complacent. This was also the year where practitioners skipped incremental improvements and reached for the big guns: SCM, CI/CD, test automation, and a [DevOps platform](/solutions/devops-platform/) were the most popular additions to their DevOps practices. \n\nWhy do teams strive for a DevOps maturity model? Code quality, faster time to market and improved security were the top three reasons.\n\nTesting remains the DevOps problem child – for the third year in a row participants said test is the most likely reason for release delays. There is some light at the end of the tunnel, though: not only has the percentage of teams with full test automation more than doubled year over year, a growing number of teams are either already using or plan to use AI/ML. Industry experts believe [AI/ML could revolutionize software testing](\u003Chttps://insidebigdata.com/2021/01/27/how-ai-and-machine-learning-will-shape-software-testing/>), and our survey participants apparently agree. \n\nAlso feeling the love in the survey were advanced technologies like Kubernetes. In our [2020 survey](/blog/devsecops-survey-released/), only 38% of survey takers used Kubernetes; this year the percentage jumped to 46% and even participants not using K8s currently said they planned to soon.\n\n## Looking to the future\n\nLast year our survey takers planned to focus on basics like automation, CI/CD and overall DevOps. But it’s 2021 now, and those efforts toward a new DevOps maturity model have paid off. This year participants plan to invest in the cloud, followed by [artificial intelligence](/blog/ai-in-software-development/). Last year, AI rated only a very distant 8th place. \n\nOur 2022 GitLab DevSecOps Survey has the latest insights from over 5,000 DevOps professionals. Download and [read the full survey](/developer-survey/).\n",[681,9,875],{"slug":4170,"featured":6,"template":686},"gitlabs-2021-survey-uncovers-a-new-devops-maturity-model","content:en-us:blog:gitlabs-2021-survey-uncovers-a-new-devops-maturity-model.yml","Gitlabs 2021 Survey Uncovers A New Devops Maturity Model","en-us/blog/gitlabs-2021-survey-uncovers-a-new-devops-maturity-model.yml","en-us/blog/gitlabs-2021-survey-uncovers-a-new-devops-maturity-model",{"_path":4176,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4177,"content":4183,"config":4189,"_id":4191,"_type":14,"title":4192,"_source":16,"_file":4193,"_stem":4194,"_extension":19},"/en-us/blog/gitlabs-2022-global-devsecops-survey-security-is-the-top-concern-investment",{"title":4178,"description":4179,"ogTitle":4178,"ogDescription":4179,"noIndex":6,"ogImage":4180,"ogUrl":4181,"ogSiteName":670,"ogType":671,"canonicalUrls":4181,"schema":4182},"DevSecOps Survey 2022: Security leads concern and investment","Find out if your successes and concerns about security and more match those of your peers.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663982/Blog/Hero%20Images/2022-devsecops-survey-blog-header.png","https://about.gitlab.com/blog/gitlabs-2022-global-devsecops-survey-security-is-the-top-concern-investment","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's 2022 Global DevSecOps Survey: Security is the top concern, investment\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-08-23\",\n      }",{"title":4184,"description":4179,"authors":4185,"heroImage":4180,"date":4186,"body":4187,"category":769,"tags":4188},"GitLab's 2022 Global DevSecOps Survey: Security is the top concern, investment",[851],"2022-08-23","\nThe days of security as a “nice to have” are officially over as we enter the era of [DevSecOps](/topics/devsecops/). In our [2022 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/previous/2022/) of more than 5,000 practitioners, security was the driving force behind technology choices, team structure, DevOps platform use, and more. \n\nThe findings from our [sixth annual survey](/developer-survey/) represent a dramatic shift from past years, when security teams – and security concerns – were often siloed and silenced in the push to get software out the door faster.\n\nNothing could be further from the truth today:\n\n- The number one reason to implement a DevOps platform? Security. (And 75% of DevOps teams use a [DevOps platform](/topics/devops-platform/) currently or plan to this year.)\n\n- The number one benefit of a DevOps platform? Security.\n\n- The number one investment priority for 2022? Security.\n\nThe attention to security in DevOps teams doesn’t stop there. As our surveys have shown since 2020, [DevOps roles continue to shift](/blog/software-developer-changing-role/), and this year, many of those shifts were laser-focused on security.\n\n- 53% of developers told us they’re “fully responsible” for security in their organizations, a 14 point increase from 2021.\n\n- Over one-third of security pros report being “hands on” and involved on a daily basis with dev and ops, an 11% increase from last year (and a massive cultural shift from groups not always known to get along).\n\n- Almost 50% of ops pros say they’re fully responsible for security in their organizations, up 20% from last year. \n\nAnd when we asked developers about the most difficult parts of their jobs, thousands pointed to security and security-related concerns. Three developers summed it up:\n\n_“Cyber security attacks are the biggest concerns facing us today.”_\n\n_“Data security, data security, I repeat, data security.”_\n\n_“Trying to build applications that are secure and stable.”_\n\n## More work to do\n\nSecurity clearly has a seat at the DevOps table today, but areas of friction remain. \n\nFor starters, security testing requires a balance that’s difficult to achieve. Static application security testing [(SAST)](/direction/secure/static-analysis/sast/), dynamic application security testing [(DAST)](/direction/secure/dynamic-analysis/dast/), and container and dependency scans are increasing, which is good news, but the percentage of devs able to easily access those results in their workflows remains stubbornly low (30% or less). \n\nAnd sec and dev [may never see eye to eye](/blog/developer-security-divide/) on finding and fixing bugs. For the third year in a row, sec pros said devs don’t find enough bugs early enough in the process, meaning they are stuck finding and fixing them much later (when it’s more difficult). And, as we’ve heard repeatedly over the last years, security’s focus and development’s focus aren’t usually the same: \n\n**57% of sec pros said finding bugs was a developer performance metric in their organizations, but 56% said it was difficult to get developers to actually prioritize bug remediation.**\n\n## Facing the future\n\nWhile security pros feel good about their organizations’ security postures (71% rated them as “good” or “very good”), they’re not feeling particularly optimistic about the future. A full 43% said they feel “somewhat” or “very” unprepared for the future; to look at it from another way, the percentage of sec pros who are confident, 56%, is 20 points *lower* than either their ops or dev colleagues.\n\nWhat can help power security professionals into the future? Surprisingly, the top answer (54%) is AI, which was a 33% increase from last year. Since 2020, sec respondents have said soft skills like communication and collaboration were most important but this year soft skills came in second place.\n\nSecurity is just one of many themes – automation, AI, information overload, real world challenges, compliance, and faster releases, to name just a few – our survey uncovered. So download and share the entire report, [“The 2022 DevSecOps Survey: Thriving in an Insecure World”](/developer-survey/), to dig deeper into them.\n\n## Read the previous surveys!\n\n[GitLab 2021 DevSecOps Survey](/developer-survey/previous/2021)\n\n[GitLab 2020 Global Developer Report: DevSecOps](/developer-survey/previous/2020/)\n\n[GitLab 2019 Global Developer Report: DevSecOps](/developer-survey/previous/2019/)\n",[681,9,875],{"slug":4190,"featured":6,"template":686},"gitlabs-2022-global-devsecops-survey-security-is-the-top-concern-investment","content:en-us:blog:gitlabs-2022-global-devsecops-survey-security-is-the-top-concern-investment.yml","Gitlabs 2022 Global Devsecops Survey Security Is The Top Concern Investment","en-us/blog/gitlabs-2022-global-devsecops-survey-security-is-the-top-concern-investment.yml","en-us/blog/gitlabs-2022-global-devsecops-survey-security-is-the-top-concern-investment",{"_path":4196,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4197,"content":4202,"config":4208,"_id":4210,"_type":14,"title":4211,"_source":16,"_file":4212,"_stem":4213,"_extension":19},"/en-us/blog/gitlabs-newest-continuous-compliance-features-bolster-software",{"title":4198,"description":4199,"ogTitle":4198,"ogDescription":4199,"noIndex":6,"ogImage":2756,"ogUrl":4200,"ogSiteName":670,"ogType":671,"canonicalUrls":4200,"schema":4201},"GitLab strengthens supply chain with compliance features","Business leaders and DevOps teams can continuously mitigate the risk of cloud-native environments and use guard rails to automate software compliance.","https://about.gitlab.com/blog/gitlabs-newest-continuous-compliance-features-bolster-software","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab’s newest continuous compliance features bolster software supply chain security\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cindy Blake\"}],\n        \"datePublished\": \"2022-02-09\",\n      }",{"title":4203,"description":4199,"authors":4204,"heroImage":2756,"date":4205,"body":4206,"category":875,"tags":4207},"GitLab’s newest continuous compliance features bolster software supply chain security",[1921],"2022-02-09","\n_This blog post contains information related to upcoming products, features, and functionality. It is important to note that the information presented is for informational purposes only._\n\n_Please do not rely on this information for purchasing or planning purposes._\n\n_As with all projects, the items mentioned in the blog post and linked pages are subject to change or delay. The development, release, and timing of products, features, or functionality remain at the sole discretion of GitLab, Inc._\n\nCompliance and risk management have become the responsibility of everyone in an organization, and DevOps is no exception. To ensure the greatest level of security with the least exposure, business leaders must be able to trust that when they adopt or create compliance frameworks and policies, the associated rules will be able to be automatically deployed and enforced throughout the software development lifecycle. GitLab’s newest functionality and our near-term roadmap will help companies shift compliance left just as they have done for security, and also simplify governance and risk management across the entire software lifecycle.\n\n## Software supply chain risks\n\nHigh-profile attacks on software supply chains, and the resulting demand for tighter controls in software development and deployment by the U.S. government and customers worldwide, have put compliance and risk management front and center. Companies are not only struggling to protect their traditional architecture, but cloud-native transformation has introduced new attack surfaces that require [DevSecOps](/topics/devsecops/) teams to secure more than just the code. Containers, orchestrators, microservices, and the cloud environment as a whole make the job of identifying and mitigating vulnerabilities and risks even more challenging.\n\nTraditional application security is [no longer enough](/blog/are-you-ready-for-the-newest-era-of-devsecops/) in the era of DevOps automation and growth of cloud-native applications. In addition to testing and monitoring the new attack surfaces, complicated toolchains full of disparate products make it difficult to gain the visibility necessary to meet compliance demands and manage risk.\n\nAt GitLab, we remain focused on innovating an end-to-end DevOps Platform that organizations can leverage to simplify all aspects of security, compliance, governance, and risk management – no matter if you are developing software in a traditional environment, a cloud-native workspace, or a hybrid of the two.\n\nSecurity and compliance remain key focuses for our product investment. Let’s take a quick look at recent innovations along with what’s coming in the near-term within the three themes of:\n\n- Enabling secure cloud-native development\n- Security governance\n- Leveraging the DevOps Platform for better security and compliance\n\nAll of the information from these additional scans is available within existing workflows so DevSecOps teams can get the actionable insight they need to quickly find and fix issues from within the continuous integration (CI) pipeline. Here is how it looks for the developer:\n\n![WIP: Feature branch](https://about.gitlab.com/images/blogimages/cindyfeaturebranch.png){: .shadow}\n\nAt the same time, security pros get early insight into risks as vulnerabilities are merged into feature branches (pre-production). The [vulnerability report](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/) helps review and triage of vulnerabilities not resolved by the developer. This information is available at the project and group levels.\n\n![Vulnerability report](https://about.gitlab.com/images/blogimages/cindyvulnerabilityreport.png){: .shadow}\n\nThese capabilities are part of the existing GitLab Ultimate tier – no integrations or added costs required.\n\n## Enabling secure cloud-native development\n\nHere’s **what’s new** in GitLab to help DevSecOps secure cloud-native development:\n\n**Infrastructure as code scanning** – Many DevSecOps teams have started to implement [IaC](/direction/delivery/infrastructure_as_code/) as part of their software development lifecycle, so GitLab has introduced robust scanning tools that can analyze the IaC configuration files (i.e., YAML, Kubernetes, CloudFormation, Terraform) to identify common security issues of these new attack surfaces.\n\n**More flexible container scanning** – While we already had container scanning available in GitLab, we have switched to [Trivy open-source container vulnerability scanner technology](/releases/2021/06/22/gitlab-14-0-released/#container-scanning-integration-with-trivy) for pre-production environments. Trivy covers more languages and has better results than previous scanners. We also are beta-testing container scanning for production environments and [cluster image scanning](https://docs.gitlab.com/ee/user/clusters/agent/vulnerabilities.html).\n\n**API security** – APIs represent a tremendous attack surface when not properly secured. We are using the state-of-the-art fuzzing technology [acquired from Peach Tech and Fuzzit](/press/releases/2020-06-11-gitlab-acquires-peach-tech-and-fuzzit-to-expand-devsecops-offering.html) to test APIs. In addition, our [dynamic application security testing for APIs](https://docs.gitlab.com/ee/user/application_security/dast_api/) (DAST) is in beta.\n\nResults from all of the scanners (IaC, containers and APIs) are incorporated into GitLab’s CI pipeline alongside other scan results enabling correction before configuration errors manifest in production.\n\nHere’s **what’s next** that will help DevSecOps secure cloud-native development:\n\n**Production container scanning** – We plan to make production container scanning generally available to scan containers for vulnerabilities after they’ve [already been deployed](/direction/secure/composition-analysis/container-scanning/). This will help surface vulnerabilities from new exploits not tested for during development.\n\n**DAST API scanner** – We will be making our [DAST API scanner](/direction/secure/dynamic-analysis/api-security/#whats-next--why)  generally available to enable broader coverage, better quality, and easier configuration. This will help you apply even greater defense-in-depth.\n\n**API Discovery** – DevSecOps teams will be able to leverage access to code to automatically [discover and test the APIs](https://gitlab.com/gitlab-org/gitlab/-/issues/38384)  being used throughout the organization’s software supply chain. Understanding the attack surface is important to protecting it.\n\n## Security governance\n\nHere’s **what’s new** to help organizations establish and manage security and compliance guardrails that allow developers to run fast while also managing risk:\n\n**Continuous compliance** – Organizations can shift compliance left, similar to security, to identify and mitigate violations early on to avoid delays at go-live. Compliant workflow automation enables a DevOps admin to assign a compliance framework to a project and enforce scans and other common controls across all project pipelines. Developers may not easily sidestep required controls.\n\n**Policy Engine** – GitLab automates a comprehensive set of security and compliance scans within the CI pipeline. Automating what happens when exceptions are encountered has been fairly simplistic. Now, GitLab provides users with a [policy editor](https://docs.gitlab.com/ee/user/application_security/policies/#policy-editor) that provides more fine-grained rules that can determine what approvals are required helping you manage your own unique appetite for risk.\n\nThe policy engine is part of a larger direction for [Security Orchestration](/direction/govern/security_policies/security_policy_management/) that includes continued iteration on Security Alert Management, Security Policy Management, and Security Approvals.\n\nHere’s **what’s next** that will help organizations establish and manage security governance:\n\n**Compliance checks in MRs** – GitLab is further automating continuous [compliance checks into the developer’s daily workflow](https://docs.gitlab.com/ee/user/compliance/compliance_report/index.html#approval-status-and-separation-of-duties) in a similar way as security scans. This will help compliance essentially shift left so developers can find and fix compliance violations early and stay on schedule.\n\n**Governance at the group level** – We are working to bring the controls found at the project level up to the group level so that policies may be more easily applied across a broad set of projects. This project is tied to the completion of workspaces.\n\n## The benefits of a single DevOps Platform\n\nHere’s **what’s new** that enables you to leverage the benefits of a single DevOps Platform in GitLab’s Ultimate version:\n\n**Unified vulnerability management and reporting** – We’ve consolidated security findings into a [single dashboard](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/) that aggregates information from GitLab and other sources, including third-party scanners, our [security partners](/partners/technology-partners/#security), and more. You can [pull in vulnerability data from other systems](/blog/three-things-you-might-not-know-about-gitlab-security/), manual pen testing, bug bounty programs, or even from security tools that don’t run in GitLab pipeline jobs. Vulnerability management in GitLab Ultimate helps you manage all of your [software vulnerability information](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/) in one place to efficiently triage and remediate findings.\n\n**Proprietary SAST scanner** – We have [replaced some of our language-specific open-source scanners (OSS)](https://docs.gitlab.com/ee/user/application_security/sast/#supported-languages-and-frameworks) with [Semgrep](https://r2c.dev/blog/2021/introducing-semgrep-for-gitlab/), a proprietary scanner, to improve coverage, accuracy, and speed. Semgrep's flexible rule syntax is ideal for streamlining the [GitLab Custom Rulesets](https://docs.gitlab.com/ee/user/application_security/sast/#customize-rulesets) feature for extending and modifying detection rules. It also allows GitLab customers access to Semgrep's community rules.\n\nHere’s **what’s next** that will enable organizations to leverage the benefits of a single DevOps Platform in GitLab’s Ultimate version:\n\n**Software supply chain security** – Organizations will be able to secure the full software supply chain with one application while improving confidence in its integrity and security. GitLab has put together a framework describing the various aspects that are required to accomplish this based on feedback from customers, inspiration from common standards (such as SLSA), as well as thought leadership from industry analysts. We would love your thoughts and contributions to these epics. Check out our [Software Supply Chain Security direction page](/direction/supply-chain/).\n\n**Inline security training** – Developers will have just-in-time access to popular third-party security training as they encounter vulnerabilities. For instance, if a vulnerability is detected, a module will pop up that the developer can click on to learn more, including what the vulnerability is and how to fix it. This optimizes security training with an immediate need. More details coming soon.\n\n**Intelligent code security** – Leveraging a previous acquisition, GitLab plans to help organizations automatically detect and remediate insecure coding practices using [machine learning](/direction/modelops/ai_assisted/#categories). This will help our customers further reduce risk and technical debt.\n\nGitLab is uniquely transparent. By making our product roadmaps public, we encourage contribution and iteration. We invite you to contribute your ideas by checking out our [product directions pages](/direction/#job-to-be-done) and commenting on [upcoming releases](/upcoming-releases/).\n",[9,875,916],{"slug":4209,"featured":6,"template":686},"gitlabs-newest-continuous-compliance-features-bolster-software","content:en-us:blog:gitlabs-newest-continuous-compliance-features-bolster-software.yml","Gitlabs Newest Continuous Compliance Features Bolster Software","en-us/blog/gitlabs-newest-continuous-compliance-features-bolster-software.yml","en-us/blog/gitlabs-newest-continuous-compliance-features-bolster-software",{"_path":4215,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4216,"content":4222,"config":4227,"_id":4229,"_type":14,"title":4230,"_source":16,"_file":4231,"_stem":4232,"_extension":19},"/en-us/blog/gitops-as-the-evolution-of-operations",{"title":4217,"description":4218,"ogTitle":4217,"ogDescription":4218,"noIndex":6,"ogImage":4219,"ogUrl":4220,"ogSiteName":670,"ogType":671,"canonicalUrls":4220,"schema":4221},"GitOps viewed as part of the Ops evolution","Examine the evolution that led to GitOps","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682062/Blog/Hero%20Images/food-train.jpg","https://about.gitlab.com/blog/gitops-as-the-evolution-of-operations","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitOps viewed as part of the Ops evolution\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2021-07-12\",\n      }",{"title":4217,"description":4218,"authors":4223,"heroImage":4219,"date":4224,"body":4225,"category":791,"tags":4226},[1356],"2021-07-12","\n\nGitOps is a hot topic in the world of operations, but what does it provide to workflows that we didn’t have already? Looking at the evolution of the operations space, there have been many big changes in the past 20 years, and I argue that [GitOps](/topics/gitops/) is not a change, rather a summary of best practices. So, how do we describe the major phenomenon in Ops over the past 20 years? \n \nBefore 2000, the primary approach to operations was to hire a System Administrator or empower Lead Developers to do Ops work. System Administrators knew a lot about networking and server optimisations, and a good sysadmin can do most of their work through code, using Bash, Perl or Python scripts. While every software engineer likely knows at least the basics of shell scripting, even many backend engineers would not be comfortable with the level of bash scripting needed in traditional IT. \n \nBesides bash, there were situations where the infrastructure was managed through graphical user interfaces. Most enterprise IT software shipped with some level of graphical UI. This area was particularly alien to software developers. I first worked as a system administrator at a student house in Hungary. We used Novell tools to manage our network, create backups, and set up workstations. To be successful, I had to learn a lot about the tools and the domain, while my programming skills were pretty minimal.\n \nToday, a cloud-native \"system administrator\" does their job primarily through API calls. The APIs are triggered through some infrastructure as code approaches. Thus, even the sysadmins of today require much more advanced coding skills than they needed 20 years ago. Moreover, codefying your infrastructure enables battle-tested software development best practices, like testing, be introduced in operations, too. \n\nThis is a huge change compared to where we were 20 years ago. What has changed that got us to where we are now and how does it relate to GitOps?\n\n## The story\n \n\u003Ciframe src='https://cdn.knightlab.com/libs/timeline3/latest/embed/index.html?source=1_ZqRL3FjiRWlwW0Nx6imkrDcCbQtiFV4tJvR1JLiy3s&font=Default&lang=en&initial_zoom=2&height=650' width='100%' height='650' webkitallowfullscreen mozallowfullscreen allowfullscreen frameborder='0'>\u003C/iframe>\n \n### The first signals at Google\n\nThe System Administrator era is the initial period where our story starts. As we move forward, the first milestone is in 2003. For our story, two notable events happened during 2003. First, [Google presented Borg](https://research.google/pubs/pub43438/), their internal container management system that later became [Kubernetes](/blog/gitlab-kubernetes-agent-on-gitlab-com/). Second, Google hired Benjamin Treynor, and the SRE approach started with his collaboration. Let's stop here for a minute to speak about the core aspects of the SRE approach!\n\n[Site Reliability Engineering (SRE)](https://sre.google/) is a software engineering approach to IT operations. Software engineers write software to reach a goal, there is likely a process around delivering the software that includes code reviews and tests, and there are success metrics attached to the delivered output. These success metrics in the context of SRE are called Service Level Indicators, and there are related Service Level Objectives and Service Level Agreements. By applying software engineering practices to operations, the reliability and scalability of the system can be better understood and improved. Moreover, the automations that emerge from the approach enable the development teams to be more efficient as they can often self-serve their requirements.\n \n### The public cloud\n\nLet’s continue our story. For many companies around the world, an important development was Amazon Web Services (AWS). AWS launched in 2006 with 3 services: S3, SQS and EC2. Together, these services enabled companies to switch to AWS or to start their business on [AWS infrastructure](/blog/deploy-aws/). Amazon's market share has made it the leading cloud provider today, and their name is coupled tight with public clouds. As increasing workloads migrated to the cloud, the way of operations had to adapt. \n \nIn past years, I've run many interviews with IT operations professionals and asked them about their [infrastructure as code (IaC) practices](/topics/gitops/infrastructure-as-code/). From these interviews, a very strong pattern emerged around IaC adoption. Companies usually switch to IaC as they move their infrastructure to the cloud. Simply, managing dozens of cloud services through a UI is very problematic, and managing them through a single codebase is much more convenient. Together with the move to the cloud, there is a strong push to improve operations practices, and move towards more automated approaches.\n \n### The appearance of DevOps\n\nWhile the struggles of software delivery were well-known by 2009, the SRE approach pioneered at Google was not as widely adopted. As agile started to be formalized in 2000, it seemed that we found a solution to the problem of delivering the built services in front of the user becoming more and more stringent. As a result of many discussions around this topic, Patric Debois coined the term DevOps in 2009. \n \n> DevOps describes the cultural changes required in order to enable high-quality service delivery. The core idea of DevOps is to create a well-oiled process around service delivery by setting shared goals and clear ownerships. The many approaches to DevOps are highlighted by [the 9 types presented as DevOps team topologies](https://web.devopstopologies.com/).\n \nJust like many agile techniques existed before agile was formalized, the SRE approach existed before the term DevOps came to be, and it can be considered an implementation of DevOps. There are just as many agile techniques as there are ways to implement DevOps. \n \n### Containers to drive the process\n\nIn 2013, several developments were made. O'Reilly published the first book on DevOps, and the operations space got a new tool - docker - which led the way to containerisation and changed our industry tremendously. Containerisation provides a standard way to ship software. Previously, engineers could build a Debian package or a Java jar file. Basically, every technology had its own packaging solution, and there are _many_ technologies. Containers provide a single, standard way to package an application, enabling both developers to own what happens inside the container and infrastructure teams to support developers to ship containers reliably and quickly into production.\n \nThe idea of containerisation solves another problem, that of stale resources. For a long time, operations had to start different servers for various workloads, dependencies of workloads had to be taken care of, and that led to stale servers and huge inefficiencies, but we did not have a good model around orchestrating the workloads. Apache Mesos was presented in 2009 and Docker Swarm in 2014, indicating innovation in this space. In 2014, Kubernetes was presented as the open source version of Google's Borg system, and it quickly became the leading solution in this area. When released, it already supported docker containers, provided declarative infrastructure management through the Kube API, and came with a reconciliation loop at its core. Basically, the end user describes the expected state and sends it to the system, and Kubernetes tries to reach and maintain that state. Using an API for cloud operations was not new any more, still describing what we want to see, instead of imperatively commanding the system to take specific actions is a novel approach. Moreover, this enables the system to self-heal, as it can always aim at reaching the desired state. Beside better resource utilisation, these are the core values of container orchestrators.\n\n### The summary is GitOps\n\nOur story slowly gets to its end in 2017 when the GitOps term was coined. GitOps provides a summary of what we had already without adding anything new to the picture. Even though the summary was known, this workflow did not have a name yet. The cultural changes required for modern IT operations are described by DevOps and shown in the SRE approach. Automation has been with us since the advent of continuous integration, and new tools like AWS, containers, and Kubernetes enabled it in operations too. Finally, Kubernetes provides a way for the system to take care of itself (more or less), and provides a self-healing aspect of automation. As Gene Kim wrote in the _Phoenix Project_, “The Second Way is about creating the right to left feedback loops”. Coupling this with storing all the code that describes our system in a versioned manner, applying them automatically through a well-defined process, and finally using a self-healing system is what we call GitOps. \n\n## What does it mean to you\n \nAt GitLab, our [vision](https://about.gitlab.com/direction/#vision) is to provide a single application for the whole DevSecOps lifecycle. As part of this, GitLab offers one of the leading CI automation tools, and our dedicated [Infrastructure as Code](https://docs.gitlab.com/ee/user/infrastructure) and [Kubernetes Management](https://docs.gitlab.com/ee/user/project/clusters/) enable best practice operations for modern ops teams. We understand that many services are run in legacy infrastructures, where automation is very problematic, and some companies do not have the resources or need to move to Kubernetes. As shown above, the canonical definition of GitOps is not feasible in these situations. Thankfully, the value of GitOps is minor compared to the value of a strong DevOps culture combined with the automation enabled by the target systems.\n \nAs a result, I encourage everyone to approach GitOps by understanding their current level of DevOps practices as GitOps will emerge naturally from following well-known practices in the DevOps area.\n \nCover image by [Sigmund](https://unsplash.com/@sigmund?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/evolution?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n\n## Read more on GitOps with GitLab: \n\n- [GitOps with GitLab: Infrastructure provisioning with GitLab and Terraform](/blog/gitops-with-gitlab-infrastructure-provisioning/)\n\n- [Here's how to do GitOps with GitLab](/blog/gitops-with-gitlab/)\n\n- [How to use a push-based approach for GitOps with GitLab scripting and variables](/blog/how-to-agentless-gitops-vars/)\n\n- [GitOps with GitLab: Connect with a Kubernetes cluster](/blog/gitops-with-gitlab-connecting-the-cluster/)\n\n",[9,534],{"slug":4228,"featured":6,"template":686},"gitops-as-the-evolution-of-operations","content:en-us:blog:gitops-as-the-evolution-of-operations.yml","Gitops As The Evolution Of Operations","en-us/blog/gitops-as-the-evolution-of-operations.yml","en-us/blog/gitops-as-the-evolution-of-operations",{"_path":4234,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4235,"content":4241,"config":4246,"_id":4248,"_type":14,"title":4249,"_source":16,"_file":4250,"_stem":4251,"_extension":19},"/en-us/blog/gitops-next-big-thing-automation",{"title":4236,"description":4237,"ogTitle":4236,"ogDescription":4237,"noIndex":6,"ogImage":4238,"ogUrl":4239,"ogSiteName":670,"ogType":671,"canonicalUrls":4239,"schema":4240},"Is GitOps the next big thing in automation?","We polled our community on Twitter to ask about GitOps. Here is what we found.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681428/Blog/Hero%20Images/iac-gitops-blog-post_with-gl-logo.png","https://about.gitlab.com/blog/gitops-next-big-thing-automation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Is GitOps the next big thing in automation?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2020-07-14\",\n      }",{"title":4236,"description":4237,"authors":4242,"heroImage":4238,"date":4243,"body":4244,"category":679,"tags":4245},[788],"2020-07-14","\n\nInfrastructure management isn’t a new problem. After all, AWS has been publicly available since 2006. While the software development lifecycle is mostly automated, infrastructure remains a largely manual process that requires specialized teams. Infrastructure needs to be elastic, and automation would make that a much easier process than it is today.\n\n[GitOps](/topics/gitops/) is an emerging technology term that could be the answer many infrastructure teams have been searching for. At its core, GitOps is a process that helps teams automate IT infrastructure through processes they already use in application development.\n\nIt’s a framework we’re excited about. Naturally, we took it to Twitter.\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">Where are YOU at with \u003Ca href=\"https://twitter.com/hashtag/GitOps?src=hash&amp;ref_src=twsrc%5Etfw\">#GitOps\u003C/a>?\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1277595216468418560?ref_src=twsrc%5Etfw\">June 29, 2020\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n## What is GitOps?\n\nWhat makes [GitOps](/solutions/gitops/) unique is that it’s not a single product, plugin, or platform. Before we dive into what we can learn from these results, let’s define what exactly GitOps _is_.\n\nAt GitLab, we define GitOps as this:\n\n>GitOps is an operational framework that takes [DevOps](/topics/devops/) best practices used for application development such as version control, collaboration, compliance, and CI/CD, and applies them to infrastructure automation.\n\nGitOps happens in the same version control system as application development, enabling teams to collaborate more in a central location while benefiting from all the [built-in features of Git](https://devops.com/an-inside-look-at-gitops/). Infrastructure teams that practice GitOps use configuration files stored as code ([infrastructure as code](/topics/gitops/infrastructure-as-code/)).\n\nInfrastructure teams then take IaC and make changes using [merge requests](/blog/future-merge-requests-realtime-collab/) (MRs). Once changes are reviewed and approved, they are deployed using a CI/CD pipeline. With infrastructure changes codified, repeatable, and traceable, it leaves less room for human error and gets everyone on the same page.\n\n>GitOps = IaC + MRs + CI/CD\n\nWe thought it would be interesting to reach out to our Twitter followers to see just how many people are exploring this framework, or maybe haven’t heard of it at all. Here’s what we gleaned from our poll.\n\n## 23.8% use GitOps today\n\nWhile we have to admit that GitLab followers are probably going to be a sophisticated group, numbers like this are still very encouraging. If almost a quarter of respondents are using this new framework, it tells us that GitOps is a viable way of automating infrastructure.\n\n## 10.6% plan to implement GitOps\n\nImplementing a new process can be difficult, even for the most organized teams. GitOps allows for greater collaboration, but that is not necessarily something that comes naturally. For infrastructure teams used to making quick, manual changes, this new process is a big departure. If more than 10% of respondents are looking to get started with GitOps, we can help them understand what goes into adopting the new framework.\n\n## 11.6% have looked but not committed to GitOps\n\nThis kind of “shopping cart abandonment” differs from the type we’re most familiar with, but it has some similarities. For those that have heard of GitOps, what prevented them from implementing it and what hurdles did they anticipate?\n\nGitOps principles can be applied to all types of infrastructure automation, including VMs and containers, and can be very effective for teams looking to manage [Kubernetes clusters](/solutions/kubernetes/). But there might be some confusion on whether Kubernetes is required for GitOps (it’s not). Still, over 11% of respondents are familiar with GitOps but may not understand how it can apply to them.\n\n## 54% haven’t explored GitOps yet\n\nSince GitOps is still emerging, it’s not surprising that more than half of the respondents haven’t explored it yet. GitOps is an exciting topic because it offers automation using many of the same tools organizations already use, but before committing to a brand new process, it’s important for organizations to know how it works.\n\nCollaboration is part of what makes DevOps so effective, and [GitOps brings that same spirit of code collaboration into the infrastructure provisioning process](/topics/gitops/gitops-gitlab-collaboration/). Managing infrastructure through the same version control system used for application development brings a new level of transparency across the entire organization.\n\nAs we continue to explore GitOps, information like this poll lets us know where the community is in the adoption of new processes. Could GitOps be the next big thing in automation?\n\nIf you’d like to learn more about GitOps and how it works, check out this panel with GitOps experts from [Weaveworks](https://www.weave.works), [HashiCorp](https://www.hashicorp.com), [Ansible](https://www.ansible.com), and GitLab where we discuss:\n\n*   How GitOps is changing the landscape of infrastructure management\n*   What successful GitOps looks like\n*   What teams need to get started on their GitOps journey\n\n{::options parse_block_html=\"true\" /}\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>&nbsp;&nbsp;\nWatch GitLab's [GitOps expert panel](/why/gitops-infrastructure-automation/) webcast\n&nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n{: .alert .alert-webcast}\n\n**Read more about infrastructure:**\n\n[Why GitOps should be the workflow of choice](/blog/why-gitops-should-be-workflow-of-choice/)\n\n[How to use GitLab and Ansible to create infrastructure as code](/blog/using-ansible-and-gitlab-as-infrastructure-for-code/)\n\n[How infrastructure teams use GitLab and Terraform for GitOps](/topics/gitops/gitlab-enables-infrastructure-as-code/)\n",[109,9,267],{"slug":4247,"featured":6,"template":686},"gitops-next-big-thing-automation","content:en-us:blog:gitops-next-big-thing-automation.yml","Gitops Next Big Thing Automation","en-us/blog/gitops-next-big-thing-automation.yml","en-us/blog/gitops-next-big-thing-automation",{"_path":4253,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4254,"content":4260,"config":4265,"_id":4267,"_type":14,"title":4268,"_source":16,"_file":4269,"_stem":4270,"_extension":19},"/en-us/blog/gitops-with-gitlab-using-ci-cd",{"title":4255,"description":4256,"ogTitle":4255,"ogDescription":4256,"noIndex":6,"ogImage":4257,"ogUrl":4258,"ogSiteName":670,"ogType":671,"canonicalUrls":4258,"schema":4259},"GitOps with GitLab: The CI/CD Tunnel","This is the fifth in a series of tutorials on how to do GitOps with GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667236/Blog/Hero%20Images/Learn-at-GL.jpg","https://about.gitlab.com/blog/gitops-with-gitlab-using-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitOps with GitLab: The CI/CD Tunnel\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2022-01-07\",\n      }",{"title":4255,"description":4256,"authors":4261,"heroImage":4257,"date":4262,"body":4263,"category":791,"tags":4264},[1356],"2022-01-07","\n\n_It is possible to use GitLab as a best-in-class GitOps tool, and this blog post series is going to show you how. These easy-to-follow tutorials will focus on different user problems, including provisioning, managing a base infrastructure, and deploying various third-party or custom applications on top of them. You can find the entire \"Ultimate guide to GitOps with GitLab\" tutorial series [here](/blog/the-ultimate-guide-to-gitops-with-gitlab/)._\n\nIn this article, we will see how you can access a Kubernetes cluster using GitLab CI/CD and why you might want to do it even if you aim for [GitOps](/topics/gitops/).\n\n## Prerequisites\n\nThis post assumes that you have a Kubernetes cluster connected to GitLab using the GitLab Kubernetes Agent. If you don't have such a cluster, I recommend consulting the previous posts (linked above) to have a similar setup from where we will start today.\n\n## Meet the CI/CD Tunnel\n\nThe GitLab Kubernetes Agent is not just a GitOps tool that will enable pull-based deployments and be one more application to maintain beside the other 70 in your DevOps stack. The GitLab Kubernetes Agent aims to serve the GitLab vision of providing you a single application for the whole DevSecOps lifecycle. As a result, the Agent's goal is to provide an integrated experience with every relevant GitLab feature.\n\nWhat GitLab features does the Agent integrate with today?\n\n- GitLab CI/CD\n- Container network security\n- Container host security\n- Container scanning\n\nIn this post, we will focus on the GitLab CI/CD integration. Given the power and flexibility of GitLab CI/CD, the majority of our users have been using it for years successfully and, until the Agent appeared, they often had to manually script their cluster connections and deployments into it. If the previous setup sounds familiar, I recommend checking out the Agent's CI/CD integration features, the CI/CD tunnel. The CI/CD tunnel enables a cluster connection to be used from GitLab CI/CD, thus you need only minor adjustments to your existing setup, and will receive a GitLab supported component that we are continuously expanding to provide more and more integrations on top of it.\n\nThe CI/CD tunnel is always enabled in the project where you register and configure the Agent, and the given connection can be shared by other groups and projects, too. This way, a single connection can be reused throughout the organization to save on resource and maintenance costs.\n\nGitLab automatically injects the available Kubernetes contexts into the CI/CD runner environment's `KUBECONFIG`. As a result, you can activate a context and start using it without much setup.\n\n## How to configure the CI/CD tunnel\n\nAs already mentioned, the CI/CD tunnel is always enabled in the project where you register and configure the Agent. If you would like to use the tunnel in the same repository, no configuration is needed. If you would like to share the connection with other repositories, open your agent configuration file and add the following lines:\n\n```yaml\nci_access:\n   projects:\n   - id: path/to/project\n   groups:\n   - id: path/to/group\n```\n\nChange the placeholder paths here to your project or group path. Sharing a connection with a group enables access to all the projects within that group. Once you save the configuration file, you can turn your attention to your application project repository, and use the following job to list and select an agent:\n\n```yaml\ndeploy:\n   image:\n     name: bitnami/kubectl:latest\n     entrypoint: [\"\"]\n   script:\n   - kubectl config get-contexts \n   - kubectl config use-context path/to/agent-configuration-project:your-agent-name\n```\n\n## How to install GitLab integrated applications into your cluster\n\nAs an application of the above, let's install some applications into the cluster. As various GitLab features require applications in your cluster to be installed and configured for GitLab, Gitlab provides a cluster management project template to help you get started. One can easily install these GitLab integrated applications into their clusters using this template. Let's see how to use it with the CI/CD tunnel and the Agent!\n\n### Create the cluster management project\n\nFirst, let's create a new GitLab project using the \"Cluster Management Project\" template. Open the [create new project from template page](https://gitlab.com/projects/new#create_from_template), search for \"GitLab Cluster Management\", and start a new project with that template.\n\nYou will receive a project that already contains quite a lot of things! It comes with a ready-made `.gitlab-ci.yml` file and [helmfile](https://github.com/roboll/helmfile) based setup for 11 applications that integrate with various GitLab functionalities. [Each application might require different configurations](https://docs.gitlab.com/ee/user/clusters/management_project_template.html#built-in-applications). You can read about these in the linked documentation.\n\nAs part of this article, we will install NGINX Ingress and GitLab Runners using the cluster management project.\n\n### How to share the CI/CD tunnel\n\nThis newly created project needs access to one of your clusters. Let's share an Agent's connection with this project as described above. Edit your agent configuration file and add:\n\n```yaml\nci_access:\n   projects:\n   - id: path/to/your/cluster/management/project\n```\n\n### Pick the right Kubernetes context\n\nThe CI/CD tunnel is already available from within your cluster management project. We tried to make it simple to start using a cluster connection without the need to edit `.gitlab-ci.yml`. For simple setups, you can just set a `KUBE_CONTEXT` environment variable with the path to and name of your agent.\n\nSet an environment variable under \"Settings\" / \"CI/CD\" / \"Variables\"\n\n![KUBE_CONTEXT variable setup](https://about.gitlab.com/images/blogimages/2022-01-07-gitops-with-gitlab-using-ci-cd/KUBE_CONTEXT_setting.png)\n\n### How to install NGINX Ingress\n\nWe are ready to install any of the supported applications using this agent connection! Let's start by installing NGINX Ingress as it does not require any application-specific configuration.\n\nIn your cluster management project, edit `helmfile.yaml` and uncomment the line that points to the `ingress` application. Commit the changes and wait for GitLab magic to happen!\n\nThis was really easy!\n\n### How to install GitLab Runner\n\nAs GitLab Runner is more integrated with GitLab, it needs a little bit of configuration. [The Runner should know](https://docs.gitlab.com/ee/user/infrastructure/clusters/manage/management_project_applications/runner.html#required-variables) where it can find your GitLab instance and needs a token to authenticate with GitLab.\n\nTo make it simple for you to install a Runner fleet, you can configure these as environment variables. By default the `CI_SERVER_URL` variable is used to specify the GitLab url. You can overwrite this if needed. For the token, you should create `GITLAB_RUNNER_REGISTRATION_TOKEN` as a masked and protected environment variable with the value of your Runner registration token. Feel free to use either a project or a group registration token.\n\nFinally, as with the Ingress installation, uncomment the related line in the `helmfile.yaml`.\n\n## The full potential of the cluster management project\n\nThe cluster management project you created is yours. Thus, you are free to change it, extend it, or get rid of it. In this section, I would like to share with you a few ideas of how you might benefit the most from it.\n\n### Did you move away from Helm v2 already?\n\nThe `.gitlab-ci.yml` file in the cluster management project has a job that supports users to upgrade their Helm v2 installations to v3. If you never had these applications installed through a cluster management project with Helm v2, then you don't need that job. Feel free to delete it from your CI yaml.\n\n### Extend the project with your own apps\n\nThe cluster management project is self-contained as is. You can add your own helm/helmfile based application setups to it. To get started, I recommend to check out the [helmfile](https://github.com/roboll/helmfile) README.\n\n### Stay up to date\n\nWe want you to own the cluster management project, so you can upgrade the applications independently of GitLab releases. Still, you might prefer to follow GitLab releases, too, as you can expect improvements to the cluster management project template. How can you do that?\n\nIf you followed the `kpt` based Agent installation setup, you know that `kpt` can check out a git subtree and merge local changes with upstream changes when you request an update. You can use `kpt` here, too! \n\nAs you manage the cluster management project, you can replace selected applications with their `kpt` checkouts. For example, you can start following the upstream template with:\n\n```bash\ncd applicatioins\nrm -rf prometheus\nkpt pkg get https://gitlab.com/gitlab-org/project-templates/cluster-management.git/applications/prometheus prometheus\n```\n\nand update to the most recent version by running:\n\n```bash\nkpt pkg update applications/prometheus\n```\n\n## Recap\n\nAs we have seen in this article, the GitLab Kubernetes Agent provides way more possibilities than focused GitOps tools do. Besides supporting pull-based deployments, we support GitLab users with integrating into their existing CI/CD based workflows. Moreover, a Cluster Management Project template ships with GitLab that supplements the various GitLab integrations to simplify getting started with them.\n\n## What's next\n\nBuilding on our knowledge of the CI/CD tunnel, in the next article we will look into how to use Auto DevOps with the Agent.\n\n_[Click here](/blog/the-ultimate-guide-to-gitops-with-gitlab/) for the next tutorial._\n\n\n\n\n\n",[1477,9,683],{"slug":4266,"featured":6,"template":686},"gitops-with-gitlab-using-ci-cd","content:en-us:blog:gitops-with-gitlab-using-ci-cd.yml","Gitops With Gitlab Using Ci Cd","en-us/blog/gitops-with-gitlab-using-ci-cd.yml","en-us/blog/gitops-with-gitlab-using-ci-cd",{"_path":4272,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4273,"content":4278,"config":4283,"_id":4285,"_type":14,"title":4286,"_source":16,"_file":4287,"_stem":4288,"_extension":19},"/en-us/blog/gitops-with-gitlab",{"title":4274,"description":4275,"ogTitle":4274,"ogDescription":4275,"noIndex":6,"ogImage":928,"ogUrl":4276,"ogSiteName":670,"ogType":671,"canonicalUrls":4276,"schema":4277},"GitOps delivery by connecting Kubernetes clusters to GitLab","This is the first in a seven-part series on GitOps using GitLab's DevOps Platform.","https://about.gitlab.com/blog/gitops-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Here's how to do GitOps with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2021-10-21\",\n      }",{"title":4279,"description":4275,"authors":4280,"heroImage":928,"date":1494,"body":4281,"category":791,"tags":4282},"Here's how to do GitOps with GitLab",[1356],"\n\n_It is possible to use GitLab as a best-in-class GitOps tool, and this blog post series is going to show you how. These easy-to-follow tutorials will focus on different user problems, including provisioning, managing a base infrastructure, and deploying various third-party or custom applications on top of them. You can find the entire \"Ultimate guide to GitOps with GitLab\" tutorial series [here](/blog/the-ultimate-guide-to-gitops-with-gitlab/)._\n\nThis post provides an overview of the series, and will provide a bit of context around GitOps, [Infrastructure as Code](/topics/gitops/infrastructure-as-code/), and related notions.\n\n## Start with the buzzwords\n\nThe DevOps industry is changing at a very fast pace, and there are plenty of new ideas popping up around this transformation. What are these? Let’s look into the following concepts and why they matter: DevOps, site reliability engineers (SRE), GitOps, Infrastructure as Code, and containers.\n\nThe term DevOps was coined by Patrick Debois in 2009. DevOps is a cultural approach, not a technology or a set of processes. At its core there are a few principles such as continuous learning, fast feedback loops and a clear flow of work. There is a strong connection between DevOps and SRE, as one can think of the SRE approach as a well-defined implementation of DevOps. Two important aspects of the SRE approach are codified infrastructure management and metrics. These enable the level of automation needed for feedback, and their central metrics (SLIs) are being moved to the left down to development teams too.\n\nWith the emergence of cloud computing, infrastructure can be managed fully through APIs. This gave rise to Infrastructure as Code or IaC. IaC means infrastructure engineers almost never have to click through a provider’s UI to configure a new user or a resource. IaC approaches can be used to configure GitLab itself or to allow GitLab to configure a 3rd party system (such as creating a cluster or managing databases).\n\n[GitOps](/topics/gitops/) is the new kid on the block here, and it basically summarizes the current state of our industry. IaC projects likely store their code in version-controlled ways, probably in git. They might even be automated through pipelines, and the resulting infrastructure might have good observability built into the whole stack. So, what does GitOps bring to the table? It brings us two things. First, GitOps wants to avoid drift using a reconciliation loop that automatically “fixes” the infrastructure if it deviates from the codified state found in the IaC repository. Whether this is feasible and how this is done is still a debated question. At the same time, the rise of declarative infrastructure popularized by Kubernetes makes this a compelling approach to many. The second benefit of GitOps is the \"declarative\" ability. By being declarative, the desired state of the infrastructure is described in the git repo. This simplifies complexity in provisioning as the end-system is tasked by setting up the described infrastructure. Contrast this with an imperative setup where the administrators have to codify the exact steps of setting up the infrastructure.\n\nContainers are mentioned here for a single reason: Once we get to deployments, I am going to focus on containerized applications only. Containers have already proved to be a great layer of abstraction for application delivery.\n\nYou can [read more about the evolution of DevOps](/blog/gitops-as-the-evolution-of-operations/) and how we got to GitOps as part of this evolution.\n\n## The series overview\n\n**Infrastructure provisioning with GitLab and Terraform**: My next post in the series will outline how to use GitLab to provision infrastructure. In this post I will use a GitLab project to create an EKS cluster following IaC best practices. To do this I will use Terraform, as Terraform is considered to be the de facto standard in infrastructure provisioning, and GitLab has strong built-in support for it.\n\n**Connecting GitLab with a Kubernetes cluster - Quickstart**: This post will show how one can quickly connect a cluster with GitLab using our recommended way, the GitLab Agent for Kubernetes. As this is a quickstart, this approach does not use all the GitLab IaC recommendations. Nevertheless it is a great start that we can build upon later. This post will outline the different approaches for connecting a cluster to GitLab, including our recommended approach.\n\n**Secrets management with GitLab**: In the third post, I will deploy a simple “secrets as code” solution into our cluster and set it up for future use. This will demonstrate how third-party services can easily be deployed and managed with GitLab. Moreover, this specific tool will be used in the subsequent post where we migrate from the quickStart cluster connection to a self-managing, IaC connection.\n\n**Managing the cluster connection from code**: In the second post, we created a GitLab-connected cluster, but there we either need to manage the cluster from our local CLI or need to do some CI magic. Now I will demonstrate how to build out a more robust management for the cluster connection. We set up the cluster connection to manage itself using a pull-based approach.\n\n**Integrate the cluster into GitLab**: As GitLab is not just an SCM and CI tool, but the complete DevOps Platform, it has robust monitoring and security integrations with Kubernetes. In this post I am going to show how one can use the GitLab-provided cluster management application on top of our cluster connection, and install NGINX, Cilium, and custom runners with minimal effort, in an IaC style.\n\n**Application deployment with Auto DevOps**: The final post in the series will illustrate how business applications can be easily deployed into the cluster. I will focus on push-based deployments as many development teams might be familiar with pipelines, unlike the most recent pull-based approaches. At the same time, given the content from the previous posts, it should be possible to put together a pull-based deployment as top of Auto DevOps as well.\n\n_[Click here](/blog/the-ultimate-guide-to-gitops-with-gitlab/) for the next tutorial._\n\n\n",[534,9,1477],{"slug":4284,"featured":6,"template":686},"gitops-with-gitlab","content:en-us:blog:gitops-with-gitlab.yml","Gitops With Gitlab","en-us/blog/gitops-with-gitlab.yml","en-us/blog/gitops-with-gitlab",{"_path":4290,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4291,"content":4296,"config":4303,"_id":4305,"_type":14,"title":4306,"_source":16,"_file":4307,"_stem":4308,"_extension":19},"/en-us/blog/gitpod-desktop-app-personal-activities",{"title":4292,"description":4293,"ogTitle":4292,"ogDescription":4293,"noIndex":6,"ogImage":928,"ogUrl":4294,"ogSiteName":670,"ogType":671,"canonicalUrls":4294,"schema":4295},"Why we built GitDock, our desktop app to navigate your GitLab activities","Life is full of moving parts. We get it. And that's why we created GitDock so you can keep track of all things GitLab right from your desktop.","https://about.gitlab.com/blog/gitpod-desktop-app-personal-activities","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we built GitDock, our desktop app to navigate your GitLab activities\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Marcel van Remmerden\"},{\"@type\":\"Person\",\"name\":\"Jeremy Elder\"}],\n        \"datePublished\": \"2021-10-05\",\n      }",{"title":4292,"description":4293,"authors":4297,"heroImage":928,"date":4300,"body":4301,"category":791,"tags":4302},[4298,4299],"Marcel van Remmerden","Jeremy Elder","2021-10-05","\n\nKeeping track of everything that is happening in your GitLab projects and groups can be quite overwhelming. Often times you care about not only one project, but multiple ones. Even worse, these projects might even belong to different groups, making everything more complex.\n\nAs an example, product designers at GitLab might work on all of these different projects over the course of just one week:\n\n- [gitlab-org/gitlab](https://gitlab.com/gitlab-org/gitlab) (our product)\n- [gitlab-com/www-gitlab-com](https://gitlab.com/gitlab-com/www-gitlab-com) (our handbook)\n- [gitlab-org/gitlab-design](https://gitlab.com/gitlab-org/gitlab-design/) (space for discussions)\n- [gitlab-org/gitlab-services/design.gitlab.com](https://gitlab.com/gitlab-org/gitlab-services/design.gitlab.com) (our design system)\n- [gitlab-org/ux-research](https://gitlab.com/gitlab-org/ux-research) (research studies)\n\n## User-centric vs. project-centric navigation\n\nOne of our product design managers ([@jackib](https://gitlab.com/jackib)) created a visualization that shows the current project-centric navigation model that we have in place.\n\n![Project-centric navigation](https://about.gitlab.com/images/blogimages/2021-10-05-gitdock/project-centric-navigation.png)\n\nThis model puts the burden of keeping track of your activities and the work you care about on the user. We would rather look for opportunities where we can enable a more user-centric navigation.\n\n![User-centric navigation](https://about.gitlab.com/images/blogimages/2021-10-05-gitdock/user-centric-navigation.png)\n\n## Why do we care about this?\n\nUsers already have different ways to stay up to date, for example email notifications, our \"to-dos,\" or custom systems they have set up for themselves. However, when we ran a UX research study, we noticed these tools often times only show a small subset of the things that users are curious about or the tools have to be checked multiple times during the day.\n\nA short summary of the main points we learned from this study:\n\n- Maintainers care about what happened to their project since they last looked at it.\n- Users repeatedly check their pipelines to see the results.\n- Often times users need to jump back into issues/MRs they have recently contributed to.\n\n## What is GitDock?\n\nGitDock is a desktop app you can install on your macOS/Windows/Linux machine (download [latest release](https://gitlab.com/mvanremmerden/gitdock/-/releases)). When installed, you will have an icon on your menu bar that brings up a small window.\n\n![GitDock](https://about.gitlab.com/images/blogimages/2021-10-05-gitdock/gitdock-window.png)\n\nFrom there you will have direct access to the following information:\n\n- The last pipelines you triggered\n- Your recently viewed GitLab objects (MRs, Issues, Epics, etc...)\n- Favorite projects\n- Your most recent comments\n- Bookmarked items\n\nGitDock also sends you a system notification whenever a pipeline completes, or when a new to-do was created for you.\n\nAll of these features try to put the user at the center. You can see me walk through all functionality in this overview video:\n\n[![YouTube video](https://about.gitlab.com/images/blogimages/2021-10-05-gitdock/gitdock-youtube.png)](https://www.youtube.com/watch?v=WkVS38wo4_w)\n\nYou can also see the entire code in our [GitDock](https://gitlab.com/mvanremmerden/gitdock) project and download the [newest release for your machine](https://gitlab.com/mvanremmerden/gitdock/-/releases). \n\n## Why didn't we make this part of our Web UI?\n\nThe main goal for GitDock is to help us learn how users want to navigate in this more user-centric approach. We decided to build this [minimum viable change (MVC)](https://handbook.gitlab.com/handbook/values/#minimal-viable-change-mvc) in a separate product as it allowed us to move faster and use a few shortcuts, e.g. relying on the local browser history for the recently viewed items instead of storing these in our database. It also permitted us to cut some corners on performance as our API is not yet optimized for this approach. Here's one way example of how it's not optimized: getting the last pipeline you triggered requires three API calls to different endpoints.\n\nOne other advantage is that it gives us a space to test new ideas that we are curious about without having to fully commit to them (e.g. bookmarks).\n\n## What are the next steps?\n\nWe want to use the learnings and data from this project to help us [build a better start page for GitLab](https://gitlab.com/gitlab-org/gitlab/-/issues/225331). Right now this page is configurable and can show you different content, but almost 99% of users keep the default \"Your projects\" list as start page. We don't think users do this because it is truly the most useful option, and we want to create a better experience for this.\n\nThat's why we are still looking for feedback. Let us know what you think about GitDock and what other content would be helpful for you in a start page, or other navigation feature.\n",[728,3644,9],{"slug":4304,"featured":6,"template":686},"gitpod-desktop-app-personal-activities","content:en-us:blog:gitpod-desktop-app-personal-activities.yml","Gitpod Desktop App Personal Activities","en-us/blog/gitpod-desktop-app-personal-activities.yml","en-us/blog/gitpod-desktop-app-personal-activities",{"_path":4310,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4311,"content":4317,"config":4323,"_id":4325,"_type":14,"title":4326,"_source":16,"_file":4327,"_stem":4328,"_extension":19},"/en-us/blog/gke-gitlab-integration",{"title":4312,"description":4313,"ogTitle":4312,"ogDescription":4313,"noIndex":6,"ogImage":4314,"ogUrl":4315,"ogSiteName":670,"ogType":671,"canonicalUrls":4315,"schema":4316},"GitLab + Google Cloud Platform = simplified, scalable deployment","We’ve teamed up with Google Cloud Platform – here’s what that means for you.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671280/Blog/Hero%20Images/gitlab-gke-integration-cover.png","https://about.gitlab.com/blog/gke-gitlab-integration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab + Google Cloud Platform = simplified, scalable deployment\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2018-04-05\",\n      }",{"title":4312,"description":4313,"authors":4318,"heroImage":4314,"date":4319,"body":4320,"category":299,"tags":4321},[1378],"2018-04-05","\n\nGet super-simple deployment for your app with GitLab and Google Cloud Platform (GCP): thanks to our integration with Google Kubernetes Engine (GKE), you can now get CI/CD and Kubernetes deployment set up with just a few clicks, and [$500 credit](#get-seamless-integration-with-gke-and-500-credit-for-your-project) to get you started.\n\n## Now everyone can get automatic code quality, security testing, and no-configuration deployment\n\nWith increasing adoption of [cloud native](/topics/cloud-native/) practices, the use of [microservices](/topics/microservices/) and containers has become critical to modern software development. Kubernetes has emerged as the first choice for container orchestration, allowing apps to scale elastically from a couple of users to millions. It's been possible to deploy to Kubernetes from GitLab for quite a while, but the process of setting up and managing everything was manual and time intensive.\n\nToday, we’re happy to announce we've been collaborating with Google to make Kubernetes easy to set up on GitLab. Now, with our native [Google Kubernetes Engine integration](/partners/technology-partners/google-cloud-platform/), you can automatically spin up a cluster to deploy applications, with just a few clicks. Simply connect your Google account, enter a few details, and you're good to go! GitLab will create the clusters for you. The clusters are fully managed by Google and run on Google Cloud Platform's best-in-class infrastructure.\n\nThis also means you can easily take advantage of GitLab [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/). This feature does all the hard work for you, by automatically configuring CI/CD pipelines to build, test, and deploy your application. To make use of Auto DevOps, it used to be necessary to have an in-depth understanding of Kubernetes, and you had to manage your own clusters. Not any more!\n\nWith the integration between GitLab and GKE, we’ve made it simple to set up a managed deployment environment on Google Cloud Platform and access our robust [DevOps capabilities](/topics/devops/). That’s all the benefits of fully automated code quality, security testing, and deployment, with none of the headache of managing and updating your clusters (Google does that all for you!). More than half of developers and 78 percent of managers in our [2018 Global Developer Report](/developer-survey/) agreed that automating more of the software development lifecycle is a top priority for their organization. We hope that this integration gives you a head start, by offering automation out of the box with Kubernetes and Auto DevOps.\n\n## What’s next for GitLab?\n\nWe’re not just excited about offering this integration for you to use, we’re excited to use it ourselves! We’re already in the process of migrating GitLab.com to Google Cloud Platform. For us, the primary reason to migrate was because it has the most mature Kubernetes platform. By moving, we get access to security functionality like default encrypted data at rest, a broad, ever-expanding list of localities served globally, and tight integration with our existing CDN for faster caching. Be on the lookout for more information on our migration as it progresses.\n\n## Get seamless integration with GKE and $500 credit for your project\n\nEvery new Google Cloud Platform account receives $300 in credit [upon signup](https://console.cloud.google.com/freetrial?utm_campaign=2018_cpanel&utm_source=gitlab&utm_medium=referral). In partnership with Google, GitLab is able to offer an additional $200 for new GCP accounts to get started with GitLab’s GKE integration. Here's a link to [apply for your $200 credit](https://cloud.google.com/partners/partnercredit/?pcn_code=0014M00001h35gDQAQ#contact-form).\n\n## Join Google and GitLab for a live demo\n\nOn April 26th, join Google’s [William Denniss](https://www.linkedin.com/in/williamdenniss/) and GitLab’s [William Chia](https://www.linkedin.com/in/williamchia/) for a walkthrough of the new GKE integration. You’ll learn how easy it is to set up a Kubernetes cluster, how to deploy your app using GitLab CI/CD, and how GKE enables you to deploy, update, and manage containerized applications at scale.\n\n[Register today](/webcast/scalable-app-deploy/)!\n",[4322,3232,231,1477,9],"GKE",{"slug":4324,"featured":6,"template":686},"gke-gitlab-integration","content:en-us:blog:gke-gitlab-integration.yml","Gke Gitlab Integration","en-us/blog/gke-gitlab-integration.yml","en-us/blog/gke-gitlab-integration",{"_path":4330,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4331,"content":4336,"config":4341,"_id":4343,"_type":14,"title":4344,"_source":16,"_file":4345,"_stem":4346,"_extension":19},"/en-us/blog/gl-for-pm-prt-2",{"title":4332,"description":4333,"ogTitle":4332,"ogDescription":4333,"noIndex":6,"ogImage":3657,"ogUrl":4334,"ogSiteName":670,"ogType":671,"canonicalUrls":4334,"schema":4335},"2 Examples of how marketing uses GitLab to manage complex projects","How GitLab technology powers integrated marketing campaigns and product marketing projects.","https://about.gitlab.com/blog/gl-for-pm-prt-2","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"2 Examples of how marketing uses GitLab to manage complex projects\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-12-11\",\n      }",{"title":4332,"description":4333,"authors":4337,"heroImage":3657,"date":4338,"body":4339,"category":791,"tags":4340},[2002],"2019-12-11","\n\n_In [part one of this series](/blog/gitlab-for-project-management-one/) we looked at the pervasive problems around collaboration and how GitLab was built to resolve those challenges both in and out of the software development space. In this second part we take a detailed look at how our marketing teams used GitLab for project management._\n\nWhen we jumped in to using GitLab for project management, we did it in a big way. The [Just Commit marketing campaign](https://gitlab.com/groups/gitlab-com/marketing/-/epics/7) which launched in January 2019 is a good example of how the marketing team uses GitLab features like issues and epics.\n\n\"It was our first integrated campaign, and if you're not familiar with what that means, it's basically landing a single message across all channels,\" says [Jackie Gragnola](/company/team/#jgragnola), marketing programs manager. “So using social media, digital marketing, all of our content, our website. and in doing so, it was involving a lot of different team members.\"\n\nSince there were so many stakeholders involved, it was unrealistic that something like a Google Doc could provide the infrastructure necessary for efficient and transparent collaboration. Jackie migrated her kick-off document from Google Docs over to GitLab. \"It was the first test into using epics to give the high-level information and then organize the group into a single unified vision for what this campaign would become,\" she explains.\n\n![justcommit-integratedcampaign](https://about.gitlab.com/images/blogimages/gitlab-for-proj-management2/justcommit_integratedcampaign.png){: .shadow.large.center}\nThe Just Commit integrated campaign epic included the JustCommit label, as well as campaign goals, personas the campaign is targeting, links to recorded meetings, and more.\n{: .note.text-center}\n\nThe Just Commit ancestor epic also included details such as [UTM tracking links](https://gitlab.com/groups/gitlab-com/marketing/-/epics/7#utm-for-tracking-urls), a [list of teams and DRIs involved in the campaign](https://gitlab.com/groups/gitlab-com/marketing/-/epics/7#teams-involved-roles-responsibilities), and a [timeline of key dates and deliverables](https://gitlab.com/groups/gitlab-com/marketing/-/epics/7#key-timeline-dates) in the lead-up to the Feb. 18, 2019 launch.\n\nA level below the ancestor epic are child epics, which were organized by areas of action items. Some examples include organic search, webcasts, emails, and events; messaging and positioning, etc.\n\n![justcommit-child epics](https://about.gitlab.com/images/blogimages/gitlab-for-proj-management2/jc-childepics.png){: .shadow.large.center}\nExamples of some of the child epics for the Just Commit integrated campaign.\n{: .note.text-center}\n\nThe Just Commit label that was created was tagged to issues related to the campaign. It is simple enough to get a high-level overview of what issues are related to the Just Commit campaign by searching for the label.\n\nIn order to dig deeper into the different categories of work, you’d look at the issue list within the different child epics. The issue list functions essentially as a list of what needs to get done, and provides a good overview of what’s left to accomplish on the list.\n\n![justcommit-issue list](https://about.gitlab.com/images/blogimages/gitlab-for-proj-management2/jc-strategy-and-design.png){: .shadow.large.center}\nThis is an example of the issue list from the strategy and design child epic.\n{: .note.text-center}\n\nInside each issue is a DRI and a due date. The due dates were important not just to stay ahead of deadline, but also because there were a lot of dependencies baked into the integrated campaign.\n\n\"We couldn't work on the content until we knew what the message was, and we couldn't work on anything related to digital marketing until we had the designs approved,\" says Jackie. \"So, this just kept us organized by saying what we needed to get done by what dates and kept us up-to-date on the timeline that would help us hit that delivery date.\"\n\nBy using GitLab features such as ancestor epics, child epics, issues, and labels, the Just Commit integrated campaign kept all stakeholders updated on their progress and accountable for their deliverables.\n\n## How product marketing uses GitLab\n\n[Tye Davis](/company/team/#davistye) is a technical marketing manager and he uses GitLab for managing product marketing projects.\n\n### Use issue boards to get a global overview of work\n\nTye works primarily within the [product marketing project](https://gitlab.com/gitlab-com/marketing/product-marketing), which is housed in the broader marketing group. Just like we saw in the Just Commit integrated campaign, there are various ancestor epics, child epics, and issues housed within this project.\n\nThe [issue board view](https://docs.gitlab.com/ee/user/project/issue_board.html) is a useful way to visualize and organize all the issues and activity happening within a specific group or project. Viewing an issue board is simple enough: Just select boards under the issues tab to see all of the issues within a specific group, or to narrow the scope select a specific project. But building one is another matter entirely.\n\nIt is important to think strategically about the level at which you build your issue board, because that will impact how much information is rolled up into the board.\n\n\"You have to think about where your work lies and where you should be building your issue boards in epics,\" says [JJ Cordz](/company/team/#jjcordz), senior marketing ops manager. \"As an example, in marketing ops we presently work across departments so we do a lot of with sales ops, biz ops, sales in general, and all of those are individual projects and groups. So our issue board is actually built at this highest level (i.e., marketing group level) because we need to pull in everything else.\"\n\nBut not every team is as integrated as marketing ops. Sometimes building an issue board at the team level, instead of the group or project level, makes the most sense for your workflow.\n\nThe [technical marketing team has its own issue board](https://gitlab.com/gitlab-com/marketing/product-marketing/-/boards/926375?&label_name[]=tech-pmm), and it is sorted by labels. The labels it uses are uniform across the marketing group to indicate the status of a particular issue – `status: plan`, `status: WIP`, `status: scheduled`, or `status: review`. The labels automatically change when a particular issue is dragged between label lanes.\n\nThe use of these labels and the different team boards that live within the product marketing group allows anyone to take a look at the status of both individual issues and larger projects.\n\n### Team boards\n\nAnother option to configure an issue board is to base it on teams and sort based on an assignee. The team board view sorted by assignee allows you to see what each team member is working on.\n\n“We create boards based on assignee. This allows us to see who has what issue and what they're working on,\" says Tye. “Maybe your manager just wants to see what the team's working on or you're being a collaborative Agile team and want to just see what everyone's doing or what you could work on together.\"\n\n### Tracking progress\n\nThere are two main options for measuring work progress from a project management perspective: [milestones](https://docs.gitlab.com/ee/user/project/milestones/#project-milestones-and-group-milestones) and [burndown charts](https://docs.gitlab.com/ee/user/project/milestones/burndown_and_burnup_charts.html).\n\nMilestones are time-bound and track work output based on a specific timeframe (e.g., Q1 FY20 – a four-month period). When creating an issue, you can assign it to a specific milestone.\n\nBurndown charts reflect all the issues that are completed within the specific milestone. Once the time period (e.g., Q1 FY20), is up, you move any remaining and new work over to the next milestone (e.g., Q2 FY 2020).\n\n### Relating to GitLab customers\n\nWhile the marketing team and other teams across the company use GitLab as a project management tool, the majority of our customers are engineers that use GitLab as an Agile planning tool for developing code.\n\nWe can still relate to our customers through our use of issues and merge requests to make changes to the handbook, publish blog posts, among other activities in different repositories within GitLab.\n\nWhether you’re an infrastructure engineer, product marketing manager, or even an editor for the GitLab blog, the GitLab product functions as a sophisticated and customizable project management tool where collaboration and efficiency are baked into the function and design.\n\nWatch the video from [GitLab Contribute](/events/gitlab-contribute/) in New Orleans to see an overview of how GitLab can be used for project management, plus more on using GitLab for integrated campaigns and product marketing.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/tbg8KSyIWVg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nCover image by [Startaê Team](https://unsplash.com/@startaeteam) on [Unsplash](https://unsplash.com/s/photos/sticky-notes).\n{: .note}\n",[9,683,749],{"slug":4342,"featured":6,"template":686},"gl-for-pm-prt-2","content:en-us:blog:gl-for-pm-prt-2.yml","Gl For Pm Prt 2","en-us/blog/gl-for-pm-prt-2.yml","en-us/blog/gl-for-pm-prt-2",{"_path":4348,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4349,"content":4355,"config":4361,"_id":4363,"_type":14,"title":4364,"_source":16,"_file":4365,"_stem":4366,"_extension":19},"/en-us/blog/global-developer-report",{"title":4350,"description":4351,"ogTitle":4350,"ogDescription":4351,"noIndex":6,"ogImage":4352,"ogUrl":4353,"ogSiteName":670,"ogType":671,"canonicalUrls":4353,"schema":4354},"2019 Global Developer Report: security roadblocks hit teams","Over 4,000 software professionals shared their DevOps experiences, helping us uncover what they require in order to innovate rapidly.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672611/Blog/Hero%20Images/2019-global-developer-report-blog.png","https://about.gitlab.com/blog/global-developer-report","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"2019 Global Developer Report: DevSecOps finds security roadblocks divide teams\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2019-07-15\",\n      }",{"title":4356,"description":4351,"authors":4357,"heroImage":4352,"date":4358,"body":4359,"category":679,"tags":4360},"2019 Global Developer Report: DevSecOps finds security roadblocks divide teams",[702],"2019-07-15","\nWe have liftoff! The 2019 Global Developer Report: DevSecOps has arrived! Thanks to the 4,071 crew members – across various industries, roles, and geographic locations – we’ve uncovered what helps and hurts software professionals on the journey to bring developers, security professionals, and operations team members together.\n\nAccording to our survey respondents, the primary mission for all software professionals today is improvement.  Everyone wants more secure code, increased visibility, reduced cycle times, and continuous deployment, but how do teams get there? Based on our survey results, DevOps done right can help realize these goals. But DevOps itself can be challenging to implement, creating other difficulties.\n\nHere are a few key takeaways from the survey that might help you create a more nuanced and strategic DevOps flight plan for your organization.\n\n## Good DevOps: The answer to security problems?\n\nSecurity teams in a longstanding DevOps environment reported they are 3 times \nmore likely to discover bugs before code is merged and 90% more likely to test \nbetween 91% and 100% of code than teams who encounter early-stage DevOps. Nearly \nhalf of all mature DevOps respondents practiced continuous deployment in at least \nsome part of their organizations. But at the same time, only about a third of \nrespondents actually rated their organizations’ DevOps efforts as “good.”\n\n> “The big takeaway from this survey is that early adopters of strong DevOps \nmodels experience greater security and find it easier to innovate, but barriers \nstill prevent developers and security teams from achieving true DevSecOps,” said \nSid Sijbrandij, CEO and co-founder of GitLab. “Teams need a single solution that \ncan provide visibility into both sides of the process for streamlined deployment.”\n\nClearly challenges remain, and nowhere is that more obvious than in security. \nWhile 69% of developers indicate they’re expected to write secure code, nearly \nhalf of security pros surveyed (49%) said they struggle to get developers to make \nremediation of vulnerabilities a priority. And 68% of security professionals feel \nthat fewer than half of developers are able to spot security vulnerabilities \nlater in the lifecycle. Roughly half of security professionals said bugs were \nmost often found by them after code is merged in a test environment.\n\n![2019 Developer Report security findings](https://about.gitlab.com/images/blogimages/security-vulnerabilities.png){: .large.center}\n\n## Choosing DevOps\n\nMore companies are making the move to DevOps than before, and for good reason – \nteams that have successfully implemented a mature [DevOps model](/solutions/security-compliance/) experience major \nimprovements in their workflow. According to the survey, developers who work at \norganizations with immature DevOps models feel their processes inhibit them, \nwhile those who work with mature models are almost 1.5 times more likely to feel \ninnovative and 3 times more likely to discover security vulnerabilities earlier \non in the pipeline.\n\nPoor DevOps practices slow teams down. Those organizations are 2.5 times more \nlikely to encounter significant delays during the planning stage and 2.6 times \nmore likely to wade through red tape, slowing efforts to quickly fix security \nvulnerabilities.\n\n## Remote work works\n\nAccording to our survey respondents, working remotely leads to greater \ncollaboration, better documentation, and transparency. In fact, developers in a \nmostly remote environment are 23% more likely to have good insight into what \ncolleagues are working on and rate the maturity of their organization’s security \npractices 29% higher than those who work in a traditional office environment.\n\n## About the survey\n\nGitLab surveyed 4,071 software professionals across various industries, roles,\nand geographic locations. The margin of error is 2%, assuming a population size\nof 23 million software professionals and a 95% confidence level.\n\n## Methodology\n\nWe launched a Global Developer Survey on Jan. 23, 2019, collecting responses\nuntil Feb. 27, 2019. During that time, we promoted the survey primarily on GitLab’s\nsocial media channels and newsletter.\n\n### Frequently asked questions\n\n| -------- | -------- |\n| **How can I read the report?**   | You can [download the full report here](/developer-survey/).   |\n| **Are the raw results publicly available?**  | Yes, you can [view the raw data here](https://www.surveymonkey.com/results/SM-8LLKL2N87/).   |\n| **Did only GitLab users take the survey?** | No, it was open to all software professionals across various industries, roles, and geographic locations.  |\n| **How can I ask questions or give feedback about the survey and results?** | Please direct questions or comments about the survey to surveys@gitlab.com. |\n| **I’d like to participate in the next survey. Can I sign up for alerts?** | The best way to receive news about the Global Developer Survey is to [sign up for our bi-weekly newsletter](/company/preference-center/). |\n",[681,9,726],{"slug":4362,"featured":6,"template":686},"global-developer-report","content:en-us:blog:global-developer-report.yml","Global Developer Report","en-us/blog/global-developer-report.yml","en-us/blog/global-developer-report",{"_path":4368,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4369,"content":4375,"config":4379,"_id":4381,"_type":14,"title":4382,"_source":16,"_file":4383,"_stem":4384,"_extension":19},"/en-us/blog/going-virtual-with-all-day-devops",{"title":4370,"description":4371,"ogTitle":4370,"ogDescription":4371,"noIndex":6,"ogImage":4372,"ogUrl":4373,"ogSiteName":670,"ogType":671,"canonicalUrls":4373,"schema":4374},"Going virtual with All Day DevOps","The real value of virtual conferences.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671730/Blog/Hero%20Images/meeting_image.jpg","https://about.gitlab.com/blog/going-virtual-with-all-day-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Going virtual with All Day DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emily Kyle\"}],\n        \"datePublished\": \"2018-10-16\",\n      }",{"title":4370,"description":4371,"authors":4376,"heroImage":4372,"date":3268,"body":4377,"category":679,"tags":4378},[3169],"\n\nIn my role, I am very fortunate to get the opportunity to attend many events throughout the year. Every conference is another opportunity to learn from thought leaders and others in the industry. The real value in these events is the knowledge share that takes place, yet I find myself frustrated every time. I try to be a sponge absorbing all the information to share all the learnings with my team, but inevitably things get lost as more time passes and we all get back to our day-to-day.  \n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://player.vimeo.com/video/290793305\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\nLuckily, All Day DevOps is different. It’s the largest DevOps conference of the year, and it’s 100% free and virtual. Anyone, from anywhere around the world, can register and tune in for 24 hours on October 17. In fact, about 200 people from our company have already registered. As a fully remote company, fully virtual events are particularly important to our team as they level the information playing field and allow everyone on our team in all 40 countries to participate and gain value.\n\nThose attending will be able to listen in on over 100 sessions from some of the industry’s brightest minds, and then ask them anything in Q&As on Slack. The other nice thing: zero vendor pitches are allowed -- a mainstay of the All Day DevOps community.\n\nThis year’s conference will feature 5 tracks this year: CI/CD, DevSecOps, Cloud Native Infrastructure, SRE, and Cultural Transformations.\n\n[Speaker](https://www.alldaydevops.com/addo-speakers) highlights include talks by:\n\n* Cindy Healy, her code sits on another planet inside the Mars Pathfinder\n* David Rensin, founder of Customer Reliability Engineering (CRE) at Google\n* George Swan, Director of Engineering Solutions at Autodesk\n* Priyanka Sharma, Director of Cloud Native Alliances at GitLab\n\nAfter you [register](https://www.alldaydevops.com/register) yourself, encourage your entire department to register.  After all, DevOps done right is a team sport.\n",[9,277],{"slug":4380,"featured":6,"template":686},"going-virtual-with-all-day-devops","content:en-us:blog:going-virtual-with-all-day-devops.yml","Going Virtual With All Day Devops","en-us/blog/going-virtual-with-all-day-devops.yml","en-us/blog/going-virtual-with-all-day-devops",{"_path":4386,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4387,"content":4393,"config":4398,"_id":4400,"_type":14,"title":4401,"_source":16,"_file":4402,"_stem":4403,"_extension":19},"/en-us/blog/goldman-sachs-partners-with-gitlab-for-next-gen-platform-strategies",{"title":4388,"description":4389,"ogTitle":4388,"ogDescription":4389,"noIndex":6,"ogImage":4390,"ogUrl":4391,"ogSiteName":670,"ogType":671,"canonicalUrls":4391,"schema":4392},"Goldman Sachs partners with GitLab for next-gen platform strategies","Goldman Sachs’ George Grant shares how partnering with GitLab has modernized the development ecosystem.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671845/Blog/Hero%20Images/serverless-ops-blog.jpg","https://about.gitlab.com/blog/goldman-sachs-partners-with-gitlab-for-next-gen-platform-strategies","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Goldman Sachs partners with GitLab for next-gen platform strategies\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brein Matturro\"}],\n        \"datePublished\": \"2020-01-24\",\n      }",{"title":4388,"description":4389,"authors":4394,"heroImage":4390,"date":4395,"body":4396,"category":679,"tags":4397},[1036],"2020-01-24","\n\nMost people know Goldman Sachs as the global investment banking giant, but over the past few years the company has branched out to some pretty modern applications that go beyond the standard financial firm. At GitLab Commit Brooklyn 2019, [George Grant](https://www.linkedin.com/in/george-grant-21a9624), who runs the US SDLC engineering team at Goldman Sachs, explained how they’ve partnered with GitLab to help transform not only their development but the company as a whole.\n\n“It means we have to be a lot more nimble than we were in the past,” Grant says. “Now that we’re developing things that run on people’s iPhones, you need to have a different sort of infrastructure to do that.” The SDLC engineering team drives strategies for the development team, including legacy products, but also newer platforms like budgeting applications and the latest Apple credit card. The team is at the center of every business move within the organization.\n\n## Getting past the “dark times”\nGolman Sachs has about 10 [SDLCs running](/platform/), having grown organically into its own ecosystem over the years for various purposes. “Many of the things that we have at GS were designed in house – its our own workflow, our own tools doing code reviews, surrounding a minimum amount of external tools. Everthing thats involved in it is very tightly coupled with everything else,” Grant says.\n\nThe deployments, the issue tracker, the builds, and the testing are all linked together in order for everything to be controlled in one environment, including regulatory and compliance. This workflow is comfortable and controlled for users, but not ideal. “The problem is, it is sort of simultaneously its greatest strength and greatest weakness because the tightness of the coupling of the components makes it very difficult to replace any of the ones,” Grant says. If any part of the environment needs to be updated or switched out, it impacts all the others.\n\n\n\nThe engineering team started researching a new strategic direction, primarily looking for a modern Git-based solution. The goal was to find a tool that could alleviate developers’ SDLC workload and provide critical strategies for [cloud and Kubernetes](/2017/11/30/containers-kubernetes-basics/), allowing people to move away from the legacy stack. “You actually want to have something that gives you the freedom to innovate, but still have that control level around it.”\n\n## Creating a roadmap with GitLab\nGoldman Sachs chose GitLab as a way to move to the cloud, as an automation tool and to ultimately become the center of the ecosystem. “We didn’t want GitLab to be an island,” Grant says. Within the first two weeks of introducing GitLab, there were over 1600 users, underscoring the push for a new strategic platform.\n\nGitLab users can be innovative without restrictions. Each user group continues to work in their own world of tooling, but in a highly regulated environment. Reduced cycle times are another benefit, according to Grant. “We have one team that used to only be able to do a release every two weeks. Now they can do one and do another one five minutes later if they want to,” he says.\n\nFor an experienced company, the ability to integrate with legacy tools is important. On top of that, GS is embracing DevOps and QA metrics now that they have end-to-end visibility within the ecosystem. The transparency of GitLab allows Goldman Sachs to have input. “We have new ideas and new ways that we want to use the product to drive it strategically within GS,” Grant says.\n\n## Goldman Sachs and GitLab: Better together\nGoldman Sachs and GitLab have established a partnership. “The proof is in the pudding, as they say, and Goldman Sachs was very, very happy to become an investor in GitLab,” Grant says. As users of the tool, Goldman Sachs found it to be a natural investment opportunity. Bottom line, he says, people are demanding to use it more often. “We believe it is the strategic platform to take us into the future.”\n\nTo learn more about Goldman Sach’s implementation strategies, watch George Grant’s presentation from GitLab Commit Brooklyn 2019.\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Bu3nrxPy1-E\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nPhoto by [Tomasz Frankowski](https://unsplash.com/@sunlifter?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[1829,109,9,1789,267],{"slug":4399,"featured":6,"template":686},"goldman-sachs-partners-with-gitlab-for-next-gen-platform-strategies","content:en-us:blog:goldman-sachs-partners-with-gitlab-for-next-gen-platform-strategies.yml","Goldman Sachs Partners With Gitlab For Next Gen Platform Strategies","en-us/blog/goldman-sachs-partners-with-gitlab-for-next-gen-platform-strategies.yml","en-us/blog/goldman-sachs-partners-with-gitlab-for-next-gen-platform-strategies",{"_path":4405,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4406,"content":4411,"config":4416,"_id":4418,"_type":14,"title":4419,"_source":16,"_file":4420,"_stem":4421,"_extension":19},"/en-us/blog/google-gitlab-serverless-webinar",{"title":4407,"description":4408,"ogTitle":4407,"ogDescription":4408,"noIndex":6,"ogImage":4005,"ogUrl":4409,"ogSiteName":670,"ogType":671,"canonicalUrls":4409,"schema":4410},"Container apps on serverless: Write once, deploy anywhere","Containers, serverless, and microservices, oh my! Cut to the chase and learn how to write apps once and deploy anywhere with emerging technologies.","https://about.gitlab.com/blog/google-gitlab-serverless-webinar","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Write once, deploy anywhere: Containerized applications on modern serverless platforms\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tina Sturgis\"}],\n        \"datePublished\": \"2019-06-13\",\n      }",{"title":4412,"description":4408,"authors":4413,"heroImage":4005,"date":1096,"body":4414,"category":299,"tags":4415},"Write once, deploy anywhere: Containerized applications on modern serverless platforms",[3326],"\n\nUsing containers has become standard practice in app development today. We all get the value of why you want to build with containers. But as a developer, why should you care about [serverless](/topics/serverless/)? It’s simple, you can eliminate worry about the infrastructure that your app is going to run on and focus on the impact of the app itself. Specifically the business logic of how the app will interact with things like the end users and/or operating systems.\n\nThe concepts of serverless quickly move the conversation towards one around a microservices architecture. As we move away from building applications in a monolith, moving towards serverless and eliminating the need to worry about that infrastructure begin to make a lot more sense.\n\nSo now, how do we take these concepts that we hear and/or read about that increase velocity, flexibility, and scalability, and put them into action for your own application development?\n\nFind out at our webinar, \"Running containerized applications on modern serverless platforms\" on Jun. 25, 2019 with GitLab and Google experts. We'll take a deep dive into how new and emerging technologies like Kubernetes, Knative, Cloud Run, and GitLab Serverless can provide great stability and scalability while lowering costs and increasing the pace of innovation.\n\n[Reserve your spot.](https://webinars.devops.com/running-containerized-applications-on-modern-serverless-platforms)\n{: .alert .alert-gitlab-purple .text-center}\n",[3232,9,231,109,1477],{"slug":4417,"featured":6,"template":686},"google-gitlab-serverless-webinar","content:en-us:blog:google-gitlab-serverless-webinar.yml","Google Gitlab Serverless Webinar","en-us/blog/google-gitlab-serverless-webinar.yml","en-us/blog/google-gitlab-serverless-webinar",{"_path":4423,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4424,"content":4430,"config":4435,"_id":4437,"_type":14,"title":4438,"_source":16,"_file":4439,"_stem":4440,"_extension":19},"/en-us/blog/google-next-post",{"title":4425,"description":4426,"ogTitle":4425,"ogDescription":4426,"noIndex":6,"ogImage":4427,"ogUrl":4428,"ogSiteName":670,"ogType":671,"canonicalUrls":4428,"schema":4429},"What to check out at Google Cloud Next 2019","Support women who code by stopping by our booth, learn from a host of GitLab experts, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679235/Blog/Hero%20Images/cloud-native-predictions-2019.jpg","https://about.gitlab.com/blog/google-next-post","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What to check out at Google Cloud Next 2019\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mayank Tahilramani\"}],\n        \"datePublished\": \"2019-04-04\",\n      }",{"title":4425,"description":4426,"authors":4431,"heroImage":4427,"date":4432,"body":4433,"category":299,"tags":4434},[4010],"2019-04-04","\n\nIt’s that time of the year to indulge in all things innovative and new at Google Cloud Next 2019.\nAs an attendee last year, I was excited to learn about Google’s vision on ‘bringing the cloud to you’\nwith a focus on hybrid cloud and unveiling of GKE On-Prem. GitLab’s partnership with Google\nhas grown a lot since we launched our quick and easy [integration with GKE](/partners/technology-partners/google-cloud-platform/)\nlast year and we hope you will come out to see some of the new things we have going on.\n\n### Don't be shy, come say hi 👋\n\nCome visit us at our booth (#S1607), get scanned, and GitLab will donate $5 to your\ncharity of choice: [Rail Girls](http://railsgirls.com/) or [Django Girls](https://djangogirls.org/).\nThis also enters you for a chance to win an iPad Pro!\n\nWhile you're there, we would love to showcase and talk about:\n\n* GitLab’s [AutoDevOps](https://docs.gitlab.com/ee/topics/autodevops/) functionality.\n* Using GitLab to [secure your applications](/stages-devops-lifecycle/secure/).\n* How to get started with [GitLab for GCP on GKE](/partners/technology-partners/google-cloud-platform/) and GKE On-Prem.\n* GitLab [Serverless with Knative](/topics/serverless/) and [Cloud Run](https://cloud.google.com/blog/products/serverless/announcing-cloud-run-the-newest-member-of-our-serverless-compute-stack),\n* ... and much more!\n\n### Sit back, relax, and listen to some of our experts live\n\n* Check out [Brandon Jung](/company/team/#brandoncjung) (VP of Alliances) discuss [GitLab’s move from Azure to GCP](https://cloud.withgoogle.com/next/sf/sessions?session=ARC207) which includes a technical\noverview of the migration as well as lessons learned. Check out our customer case study [here](https://cloud.google.com/customers/gitlab/).\n\n* Come listen to [Kathy Wang](/company/team/#wangkathy) (Senior Director of Security) tell our journey [Towards Zero Trust at GitLab.com](https://cloud.withgoogle.com/next/sf/sessions?session=SEC220) along with key lessons learned. ([You can read more about the evolution of Zero Trust here](/blog/evolution-of-zero-trust/).)\n\n* Learn something new with [Daniel Gruesso](/company/team/#danielgruesso) (Product Manager) showcasing GitLab’s serverless functionality to [Run a consistent serverless platform anywhere with Kubernetes and Knative](https://cloud.withgoogle.com/next/sf/sessions?session=HYB218).\n\n### Get hands on with Qwiklabs\n\nLearn from [Dan Gordon](/company/team/#dbgordon) (Senior Technical Marketing Manager) at our [Spotlight Lab: Introduction to GitLab on GKE](https://cloud.withgoogle.com/next/sf/sessions?session=301353-133371). Here you will have the chance to deploy GitLab on GKE, migrate a GitHub repository into a GitLab Project, and set up a CI/CD pipeline with AutoDevOps to deploy your code to GKE.\n\nSo stop by and say hello!\n\nWe are proud to be a sponsor at this event and would love to see as many of you at our booth (S1607) to discuss GitLab [Serverless](/topics/serverless/) with Knative and Cloud Run, GitLab’s integration with GKE, GitLab AutoDevOps for CI/CD, Security functionalities, as well as GitLab’s support for GKE On-Prem.\n",[855,1477,109,9,231,682,875,1158],{"slug":4436,"featured":6,"template":686},"google-next-post","content:en-us:blog:google-next-post.yml","Google Next Post","en-us/blog/google-next-post.yml","en-us/blog/google-next-post",{"_path":4442,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4443,"content":4449,"config":4455,"_id":4457,"_type":14,"title":4458,"_source":16,"_file":4459,"_stem":4460,"_extension":19},"/en-us/blog/graphql-vulnerability-api",{"title":4444,"description":4445,"ogTitle":4444,"ogDescription":4445,"noIndex":6,"ogImage":4446,"ogUrl":4447,"ogSiteName":670,"ogType":671,"canonicalUrls":4447,"schema":4448},"Using the GitLab GraphQL API for vulnerability reporting","Follow along as we teach you how to use GitLab GraphQL API to manage vulnerabilities programatically.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682219/Blog/Hero%20Images/jeremy-bishop-FzrlPh20l7Q-unsplash.jpg","https://about.gitlab.com/blog/graphql-vulnerability-api","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Using the GitLab GraphQL API for vulnerability reporting\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2022-02-02\",\n      }",{"title":4444,"description":4445,"authors":4450,"heroImage":4446,"date":4452,"body":4453,"category":875,"tags":4454},[4451],"Fernando Diaz","2022-02-02","\n\nAs part of GitLab Ultimate, you have access to the Vulnerability Report,\nwhich provides information about vulnerabilities from scans of the default\nbranch. It is available for projects, groups, and the Security Center.\nFrom the Vulnerability Report you can:\n\n- filter the list of vulnerabilities\n- view more details about a vulnerability\n- view vulnerable source location (if available)\n- view an issue raised for a vulnerability\n- change the status of vulnerabilities\n- export details of vulnerabilities\n\nYou also get to perform functions (create/read/update/delete) on vulnerabilities using the GitLab GraphQL API.\n\nIn this blog post, I'll go over some of the GitLab GraphQL API and show how\nvulnerabilities can be managed with the API. Then I'll go over how to create a\ncustom page where a user can report a vulnerability.\n\n## GitLab GraphQL API\n\nGraphQL is a query language for APIs that allows clients to request exactly\nthe data they need, making it possible to get all required data in a limited\nnumber of requests.\n\nWith the GitLab GraphQL API, you can perform many different functions on\nvulnerabilities which can be seen in the Vulnerability Reports. You can\nperform queries for data retrieval or mutations for creating, updating,\nand deleting data. \n\nThere are many other functions that can be performed on vulnerabilities using the\nGraphQL API, such as querying for vulnerability data, changing a vulnerability's\nstatus, and much more. You can see the rest of the GraphQL API functions by viewing\nthe graphql [reference page](https://docs.gitlab.com/ee/api/graphql/reference/).\n\n## Running a GraphQL query to create a vulnerability\n\nYou can run GraphQL queries in a curl request on the command line on your local\ncomputer. A GraphQL request can be made as a POST request to `/api/graphql` with\nthe query as the payload. You can authorize your request by generating a\npersonal access token to use as a bearer token.\n\nWe will be using [Mutation.vulnerabilityCreate](https://docs.gitlab.com/ee/api/graphql/reference/#mutationvulnerabilitycreate)\nin order to create a vulnerability.\n\n**1.** Create a new project or use an existing project.\n\n**2.** Create a [Personal Access Token](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#create-a-personal-access-token).\n\n**Note:** Make sure it is `api` scoped.  \n\n**3.** Set the Personal Access Token in the environment variables.\n\n    ```\n    $ export ACCESS_TOKEN=\u003Cyour-personal-access-token>\n    ```\n\n**4.** Get your Project ID to use in the curl request.\n\n**Note:** Project ID can be found in your project page  \n\n![](https://about.gitlab.com/images/blogimages/2022-graphql-vuln-api/show_projectid.png)  \n\n**5.** Send a curl request to graphql api.\n\n    ```\n    $ curl -g --header \"Authorization: Bearer $ACCESS_TOKEN\" --header \"Content-Type: application/json\" --request POST --data '{\"query\": \"mutation { vulnerabilityCreate(input: {clientMutationId: \\\"Ferns-Vuln-Reporter-Xtreme\\\", name: \\\"YEETTT\\\", project: \\\"gid://gitlab/Project/30857578\\\", description: \\\"ax\\\", scanner: {name: \\\"dude-scanner2\\\", id: \\\"123456\\\", url: \\\"localhost\\\", version: \\\"1.0\\\"}, identifiers: [{name: \\\"dont worry about its ok\\\", url: \\\"localhost\\\"}]}) { clientMutationId \\n vulnerability {  id  } \\n errors } }\" }' https://gitlab.com/api/graphql\n\n    {\"data\":{\"vulnerabilityCreate\":{\"clientMutationId\":\"Ferns-Vuln-Reporter-Xtreme\",\"vulnerability\":{\"id\":\"gid://gitlab/Vulnerability/29086674\"},\"errors\":[]}}}\n    ```\n\nYou can see that the resonse will provide some data. Let's save the provided vulnerability\nid, 29086674.  \n\n**Note:** You can see where I used the Project ID in the query above,\nby searching for \"30857578\". Also feel free to customize the strings in\nthe request.  \n\n**6.** Go to your project and click on the `Security & Compliance > Vulnerability Report`.\n\n**7.** Replace `vulnerability_report` in the url with `/vulnerabilities/29086674`, and you should\nsee detailed information on the vulnerability you submitted.\n\n![](https://about.gitlab.com/images/blogimages/2022-graphql-vuln-api/vuln_saved.png)  \n\n## Creating a Vulnerability Report site\n\nNow let's put what we learned about the Vulnerability API into creating an application\nwe can use for others to report vulnerabilities.\n\nI created a basic application that uses the GraphQL API to create vulnerabilities for\na given project. It's a little GoLang web-application that deploys to Kubernetes and\ncontains a basic web-form.\n\n**Note:** To continue with this section, you need a Kubernetes Cluster, GitLab Account, and\nknowledge of the GitLab [Kubernetes-Agent](https://docs.gitlab.com/ee/user/clusters/agent/).\n\n**1.** Create a [Personal Access Token](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#create-a-personal-access-token).\n\n**Note:** Make sure it is `api` scoped.  \n\n**2.** Create a new project and select import.\n\n![](https://about.gitlab.com/images/blogimages/2022-graphql-vuln-api/import_project.png)  \n\n**3.** Import the [Vuln-Reporter](https://gitlab.com/tech-marketing/devsecops/vuln-reporter).\n\n![](https://about.gitlab.com/images/blogimages/2022-graphql-vuln-api/repo_url.png)  \n\n**4.** Connect to a Kubernetes Cluster using the [Kubernetes-Agent](https://docs.gitlab.com/ee/user/clusters/agent/install/index.html).\n\n**5.** Add the [Ingress Controller](https://docs.gitlab.com/ee/user/infrastructure/clusters/manage/management_project_applications/ingress.html) as a [Cluster Management Application](https://docs.gitlab.com/ee/user/clusters/management_project_template.html).\n\n**Note:** Once the Kubernetes Agent is installed, this can be done by simply adding\nthe `applications` folder, `helmfile.yaml`, and `apply` job present in this [Infrastrucuture project](https://gitlab.com/tech-marketing/devsecops/initech/infrastructure).\n\n**6.** Add the following variables under `Settings > CICD > Variables`:\n\n    - PROJECT_ID: The id of the project you want to report on.\n    - ACCESS_TOKEN: Your personal access token created earlier.\n\n**7.** Run the pipeline.\n\n**8.** Connect to Kubernetes Cluster and find the Load Balancer IP.\n\n    ```\n    $ kubectl get svc -n gitlab-managed-apps | grep ingress\n\n    ingress-ingress-nginx-controller             LoadBalancer   10.28.13.2    104.198.204.142   80:31853/TCP,443:31835/TCP   19d\n    ingress-ingress-nginx-controller-admission   ClusterIP      10.28.6.20    \u003Cnone>            443/TCP                      19d\n    ```\n\n**Note:** It's the `104.198.204.142` address, but it may be different for you. Just make sure it's\nan external address.  \n\n**9.** Go to `http://\u003CLoad-Balancer-IP>/reporter` in your browser.\n\n**10.** Add info and submit a Vulnerability.\n\n![](https://about.gitlab.com/images/blogimages/2022-graphql-vuln-api/submit_vuln.png)  \n\nAfter submitting you should get a link. Copy that link\ninto your browser.\n\n\n**11.** View the Vulnerability Report.\n\n![](https://about.gitlab.com/images/blogimages/2022-graphql-vuln-api/vuln_report.png)\n\n",[9,875,1158],{"slug":4456,"featured":6,"template":686},"graphql-vulnerability-api","content:en-us:blog:graphql-vulnerability-api.yml","Graphql Vulnerability Api","en-us/blog/graphql-vulnerability-api.yml","en-us/blog/graphql-vulnerability-api",{"_path":4462,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4463,"content":4468,"config":4473,"_id":4475,"_type":14,"title":4476,"_source":16,"_file":4477,"_stem":4478,"_extension":19},"/en-us/blog/guide-to-ci-cd-pipelines",{"title":4464,"description":4465,"ogTitle":4464,"ogDescription":4465,"noIndex":6,"ogImage":928,"ogUrl":4466,"ogSiteName":670,"ogType":671,"canonicalUrls":4466,"schema":4467},"A quick guide to GitLab CI/CD pipelines","How GitLab is making a better pipeline with Auto DevOps.","https://about.gitlab.com/blog/guide-to-ci-cd-pipelines","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A quick guide to GitLab CI/CD pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-07-12\",\n      }",{"title":4464,"description":4465,"authors":4469,"heroImage":928,"date":4470,"body":4471,"category":679,"tags":4472},[788],"2019-07-12","\nTo be successful with [DevOps](https://about.gitlab.com/topics/devops/), teams must use [automation](https://docs.gitlab.com/ee/topics/autodevops/), and [CI/CD pipelines](https://about.gitlab.com/topics/ci-cd/) are a big part of that journey. At its most basic level, a pipeline gets code from point A to point B. The quicker and more efficient the pipeline is, the better it will accomplish this task.\n## What is a CICD pipeline?\n\nA pipeline is the lead component of continuous integration, delivery, and deployment. It drives software development through building, testing and deploying code in stages. Pipelines are comprised of jobs, which define what will be done, such as compiling or testing code, as well as stages that spell out when to run the jobs. An example would be running tests after stages that compile the code.\n\nA CI/CD pipeline automates steps in the SDLC such as builds, tests, and deployments. When a team takes advantage of automated pipelines, they simplify the handoff process and decrease the chance of human error, creating faster iterations and better quality code. Everyone can see where code is in the process and identify problems long before they make it to production.\n\nBefore we dive in, let's cover some basics:\n\n## The GitLab pipeline glossary\n\n**Commit**: A code change.\n\n**Job**: Instructions that a runner has to execute.\n\n**Pipeline**: A collection of jobs split into different stages.\n\n**Runner**: An agent or server that executes each job individually that can spin up or down as needed.\n\n**Stages**: A keyword that defines certain stages of a job, such as `build` and `deploy`. Jobs of the same stage are executed in parallel.\nPipelines are configured using a version-controlled YAML file, `.gitlab-ci.yml`, within the root of a project. From there, you can set up parameters of your pipeline:\n\n*   What to execute using [GitLab Runner](https://docs.gitlab.com/ee/ci/runners/#configuring-gitlab-runners)\n*   What happens when a process succeeds or fails\n\nNot all jobs are so simple. For larger products that require cross-project interdependencies, such as those adopting a [microservices architecture](/blog/strategies-microservices-architecture/), there are [multi-project pipelines](/blog/use-multiproject-pipelines-with-gitlab-cicd/).\n\n![multi-project pipelines](https://about.gitlab.com/images/topics/multi-project_pipelines.png){: .shadow.medium.center }\n\nIn GitLab 9.3 we made it possible to display links for upstream and downstream projects directly on the pipeline graph, so developers can check the overall status of the entire chain in a single view. Pipelines continue to evolve, and in our [CI/CD product vision](https://about.gitlab.com/direction/ops/) we’re looking into making pipelines even more cohesive by implementing [Multiple Pipelines in a single `.gitlab-ci.yml`](https://gitlab.com/gitlab-org/gitlab-ce/issues/22972) in the future.\n\n## Pipeline as code\n\nDefining deployment pipelines through source code such as Git, is known as pipeline as a code. The pipeline as code practice is part of a larger “as code” movement that includes infrastructure as code. Teams can configure builds, tests, and deployment in code that is trackable and stored in a centralized source repository. They can use a declarative YAML approach or a vendor-specific programming language, such as Jenkins and Groovy, but the premise remains the same.\n\nA pipeline as code file specifies the stages, jobs, and actions for a pipeline to perform. Because the file is versioned, changes in pipeline code can be tested in branches with the corresponding application release.\n\nThe pipeline as code model of creating continuous integration pipelines is an industry best practice. There are multiple benefits, such as the ability to store CI pipelines and application code in the same repository. Developers can also make changes without additional permissions, working with tools they’re already using.\n\nOther benefits are more efficient collaboration and the ability to keep information accessible so team members can act on their decisions. Pipeline changes are subject to a code review process, avoiding any break in the pipeline migration.\n\nDeployment pipelines are in a version control system independent of continuous integration tools. Pipelines can be restored if the continuous integration system goes down. If a team wants to switch CI tools at another point, pipelines can be moved into a new system.\n\nIn the early iterations of [CI/CD](/topics/ci-cd/), DevOps tools set up pipelines as point-and-click or through a GUI. This originally presented a number of challenges:\n\n*   Auditing was limited to what was already built in\n*   Unable to collaborate\n*   Difficulty troubleshooting\n\nSomething as simple as rolling back to the last known config was an exercise in futility. CI/CD pipelines during this time were prone to breaking, lacked visibility, and were difficult to change.\n\nThe pipeline as code model corrected a lot of these pain points and offered the flexibility teams needed to execute efficiently. With source code, teams could use Git to search and introspect changes.\n\nToday, many tools have adopted YAML configuration as a best practice. GitLab CI/CD has used code, rather than GUI, since the beginning for pipeline configuration. \"Pipeline as code\" comes with many of the same benefits the other \"as code\" trends have:\n\n*   **Version control** – keep track of changes over time and revert to previous configurations easily\n*   **Audit trails** – know when and what changes were made to the source code\n*   **Ease of collaboration** – code is available to the team for improvements, suggestions, and updates\n*   **Knowledge sharing** – import templates and code snippets so teams can share best practices\n*   **Built-in Lint tool** – ensures YAML file is valid and assists new users\n\nThe principles of software development apply not only to the applications we deliver but also to _how_ we build them. The pipeline as code model creates automated processes that help developers build applications better and faster. Having everything documented in a source repository allows for greater visibility and collaboration so that everyone can continually improve processes, which is what DevOps is all about.\n\n## What are the different stages of a GitLab CI/CD pipeline?\n\nPipelines are comprised of jobs, which define _what_ to do, such as compiling or testing code; stages, which define _when_ to run the jobs; and runners, which are agents or servers that execute each job, and can spin up or down as needed.\n\nPipelines are generally executed automatically and don’t need any intervention once they are created. \n\nA typical pipeline generally consists of a few stages in the following order:\n\n### Test\nThe test stage is where the code is assess to ensure there are no bugs and it is working the way it was designed to before it reaches end users. The test stage has a job called deploy-to stage. Unit testing on small, discrete functions of the source may also done. All unit tests running against a code base are required to pass. If they don’t that creates a risk that must be addressed right away.\n\n### Deploy\nThe staging stage has a job called deploy-to-stage, where a team can conduct further tests and validation. It is followed by a production stage with a job called deploy-to-production. If the code passes a series of automated tests, often the build will automatically deploy. [The endpoint is typically pre-production deployment](https://www.techtarget.com/searchsoftwarequality/CI-CD-pipelines-explained-Everything-you-need-to-know). Once the build’s integrity is completely validated by stakeholders, it can be deployed to an actual production environment. Once the build passes pre-deployment testing, in a continuous deployment pipeline, it is automatically deployed to production.Then, it is monitored. To do so effectively requires collecting and [analyzing metrics](https://about.gitlab.com/topics/ci-cd/continuous-integration-metrics/) such as deployment frequency, deployment time and lead time for changes.\n\n## How do I set up a GitLab CI/CD pipeline?\nPipeline templates are useful because writing them from scratch is a time-consuming and onerous process. GitLab has pipeline templates for more than 30 popular programming languages and frameworks. Templates to help you get started can be found in our [CI template repository](https://gitlab.com/gitlab-org/gitlab/tree/master/lib/gitlab/ci/templates).\n\nA GitLab pipeline executes several jobs, stage by stage, with the help of automated code.\n\nA continuous integration pipeline involves building something from the scratch and testing the same in a development environment. It might occur to the developers to add something after building the application and pushing it into production. This can be done with the help of continuous integration where we can add the code even after it is deployed.\n\nThis phase includes testing as well where we can test with different approaches in the code.\n\n### CD Pipeline prerequisites \nTo get started, you need to set up an [Ubuntu 18.04 server](https://www.digitalocean.com/community/tutorials/initial-server-setup-with-ubuntu-18-04) along with a sudo non-root user and firewall. You also need at least 1 GB RAM and 1 CPU.\n\n[Docker](https://www.digitalocean.com/community/tutorials/how-to-install-and-use-docker-on-ubuntu-18-04) must be installed on the server.\nA user account on a GitLab instance with an enabled container registry. The free plan of the [official GitLab instance](https://gitlab.com/) meets the requirements. You can also host your own GitLab instance by following the [How To Install and Configure GitLab on Ubuntu 18.04 guide](https://www.digitalocean.com/community/tutorials/how-to-install-and-configure-gitlab-on-ubuntu-18-04).\nThen you should create a GitLab project, adding an HTML file to it. Later, you’ll copy the HTML file into an Nginx Docker image, which in turn, you will deploy to the server.\n\n1. Log in to your GitLab instance and click new project.\n2. Give it a proper Project name.\n3. Optionally add a Project description.\n4. Make sure to set the Visibility Level to Private or Public depending on your requirements.\n5. Finally click Create project\n\n## Building better pipelines with Auto DevOps\n\nCI/CD pipelines have automated so much of the development process, however, it will still take time to do the initial work of building and configuring them in your environment. But what if you aren’t sure what all the parts of your CI/CD pipeline should be? What are the best practices you should know at every stage?\n\nIn the past, there have only been two choices: Time-consuming configuration from scratch with complete customization, or an easier auto-configuration with much less flexibility. Developers have longed for the moment where they could click a button and have a complete pipeline with code quality, language detection, and all scripts included with very little manual work.\n\n[Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) is our solution to this problem. It is a pre-built, fully-featured CI/CD pipeline that automates the entire delivery process. Instead of having to choose between time and flexibility, GitLab offers both. In addition to the Auto DevOps template, GitLab offers several CI templates that can be modified as necessary, or you can override specific settings. Want all the power of Auto DevOps for a custom test job? Just override the `script` block for the `test` job and give it a try. Since templates are also modular, teams have the option to pull in only the parts they need.\n\nWe hope this blog post gives you some insight into how we approach pipeline as code and our larger vision for how we’re improving the CI/CD pipeline experience in the future. Automated pipelines increase development speed and improve code quality, and we’re actively working on making them even better and easier to use.\n\nCover image by [Gerrie van der Walt](https://unsplash.com/photos/m3TYLFI_mDo?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/pipes?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,683,109],{"slug":4474,"featured":91,"template":686},"guide-to-ci-cd-pipelines","content:en-us:blog:guide-to-ci-cd-pipelines.yml","Guide To Ci Cd Pipelines","en-us/blog/guide-to-ci-cd-pipelines.yml","en-us/blog/guide-to-ci-cd-pipelines",{"_path":4480,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4481,"content":4487,"config":4492,"_id":4494,"_type":14,"title":4495,"_source":16,"_file":4496,"_stem":4497,"_extension":19},"/en-us/blog/guide-to-rest-api",{"title":4482,"description":4483,"ogTitle":4482,"ogDescription":4483,"noIndex":6,"ogImage":4484,"ogUrl":4485,"ogSiteName":670,"ogType":671,"canonicalUrls":4485,"schema":4486},"Guide to REST API","Learn what REST API is, how it works, and what its benefit is in software development. Also find out the underlying principles of this important technology.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098516/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945_2N8JxZDeeDLlzrsJ4boteB_1750098516673.png","https://about.gitlab.com/blog/guide-to-rest-api","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Guide to REST API\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2024-10-18\",\n      }",{"title":4482,"description":4483,"authors":4488,"heroImage":4484,"date":4489,"body":4490,"category":791,"tags":4491},[745],"2024-10-18","Whether it's developing an online booking app, a mobile payment solution, or a messaging service, chances are your team is using a REST API. In this article, you'll learn what a REST API is and how it works, as well as its benefits and uses.\n\n## What is a REST API?\n\nREST API, RESTful API, or RESTful web API: These names designate APIs that adhere to a particular standard, which is the REST architecture. Before going any further, remember that an API, or application programming interface, is software that allows two applications to communicate with each other. In computing, APIs are essential to allow various applications to work together.\n\nTo create an API, developers follow strictly defined methods and principles, so that the whole can work. Before the 2000s, developers used [SOAP](https://www.techtarget.com/searchapparchitecture/definition/SOAP-Simple-Object-Access-Protocol) (Simple Object Access Protocol), a protocol built on XML (Extensible Markup Language), which was complex to coordinate and resource-intensive. While SOAP is still used today, it has been largely replaced by REST API.\n\nDesigned in 2000 by American computer scientist Roy Fielding during his doctoral thesis, REST (REpresentational State Transfer) has become the dominant model for creating APIs, and an essential milestone in the development of the World Wide Web. Today, the vast majority of APIs are based on REST, particularly to offer web, interactive, or mobile services. Let's find out how RESTful APIs work, their advantages, and their wide-ranging applications.\n\n## How does a REST API work?\n\nIn practice, the REST API works on the principle of the client-server environment. The RESTful API retrieves and transmits a user's or application's requests on one end and the information rendered by the server (application or database) on the other end.\n\nSome key concepts make it possible to understand how a RESTful API works. The client is the entity making a request. This is the case, for example, of a user searching within a product catalog on their browser. The API is responsible for communicating the request to the server, and returning the requested information to the client. The information that passes through the API is the resources. The server processes requests. In this case, it will return the list of products matching the search criteria.\n\nThe client's requests are made through the HTTP (Hypertext Transfer Protocol) protocol. Here are the main methods and tasks it enables you to accomplish:\n- GET: retrieve data sent by the server.\n- POST: send and publish information to the server (registration form data, for example).\n- PUT: update the server information.\n- PATCH: partially modify an existing resource.\n- DELETE: delete information from the server.\n\nThere are various data formats for using a REST API. The JSON (JavaScript Object Notation) format is a lightweight format, which is easy to understand and usable by many programming languages. XML makes it possible to manage complex data structures and is compatible with other standards such as RSS. YAML and HTML are other formats often used to communicate resources.\n\n## What are the principles of the REST API?\n\nA REST API follows the REST principles regarding software architecture. These principles create a guideline for creating flexible and lightweight APIs, which are perfectly adapted to data transmission over the internet.\n\nHere are the six architectural principles that govern a REST interface:\n- Client-server decoupling. The client only knows the URI (Uniform Resource Identifier) of the resource to be retrieved. The server interacts only by transmitting its data via HTTP.\n- Uniform interface. The REST architecture standardizes how information is identified, managed, and transmitted, and uses hyperlinks to bring additional resources to the client.\nCode on demand. The server can transmit code to the client to expand its functionality, such as to help identify errors in a form.\n- Layered system. A RESTful API can run on several servers organized hierarchically, to provide a more stable and efficient service to the client.\n- Cacheable. The REST server can cache data to better serve the client, for example by storing the images of a site to then serve them again.\n- Stateless. Each client request is stand-alone and processed independently by the server. Therefore, each request must contain all the elements necessary for its processing.\n\n## What are the benefits of a REST API?\n\nBy following the REST API framework requirements, developers make use of the many advantages of the RESTful API to develop effective and powerful applications:\n- Versatility: There are no restrictions on which programming language to use, and there is a wide selection of data formats (XML, PYTHON, JSON, HTML, etc.).\n- Lightweight: The lightweight data formats of a REST API make it ideal for mobile applications or the Internet of Things (IoT).\n- Portability: Client-server separation enables the exchange of data between platforms.\nFlexibility: This API does not have the complexities of a protocol since it is an architectural style.\n- Independence: Developers can work separately on the client or server part.\n\nThe benefits of the REST API translate into increased productivity and scalability for development teams. Scaling systems using REST API is easier. The features are therefore better able to support a large load of users and operations.\n\n## Security constraints\n\nCreating and managing a RESTful web API is not without challenges. User authentication can become complex when it uses several different methods, by HTTP, API keys, or OAuth (Open Authorization). On large and complex applications, the multiplication of endpoints between the server and the client can impair overall consistency, as can updates if they leave old touchpoints still active.\n\nAdditionally, the REST interface has a weakness because it transmits potentially sensitive data, such as identifiers, through the endpoint URL. Securing it requires specific measures such as Transport Layer Security (TLS) encryption, a robust user authentication model, and a system for managing malicious requests and limiting throughput.\n\n## Uses of a REST API\n\nDevelopers use APIs with the REST architecture to create and maintain many services. Therefore, most web and mobile applications use REST APIs to access and share resources and information. In the cloud, this API makes it possible to connect the services of distributed and hybrid architectures quickly. Within large companies, it enables interoperability between information system components.\n\nRefreshing an e-commerce site's prices, automating publications, orchestrating Kubernetes clusters, etc. The RESTful APIs' scope of use is limited only by the imagination of digital application developers and creators.\n\n## The GitLab REST API\n\nGitLab offers a comprehensive suite of tools and APIs for integrating and automating external applications. It includes GraphQL, webhooks, IDE extensions, and of course, a REST API. The GitLab REST API can be authenticated in many ways, such as by access token, OAuth, or session cookies. Endpoints are available for Dockerfile, .gitignore, GitLab CI/CD YAML, and open source templates. To take full advantage of all the possibilities for developing your agile and cloud-native applications, see the complete [GitLab REST API documentation](https://docs.gitlab.com/ee/api/rest/index.html).\n\n## REST API FAQs\n\n### REST vs. SOAP\n\nREST and SOAP are two API standards. REST (REpresentational State Transfer) API uses the REST architectural principles, which allow a server and a client to communicate in a lightweight and scalable way. The REST API is the most common type of API. The SOAP (Simple Object Access Protocol) protocol is older, more rigid, and only available in XML format. This old standard can still be used for applications that require a high level of security.\n\n### What is the difference between REST and REST API?\n\nREST is a style of software architecture intended to facilitate the creation of web services and the exchange of data over the internet, by ensuring interoperability between computers and servers. The RESTful web API is a type of API that is based on the main principles of REST.\n\n### What are the principles of a REST API?\n\nA REST API follows the six main principles of the REST architecture. These principles are uniform interface, code on demand, layered system, cacheable, stateless, and client-server decoupling. The latter principle forms the basis of the structure of a RESTful API; it is essential to the success of this API in the world of web applications.\n\n## Learn more\n- [GitLab Rest API documentation](https://docs.gitlab.com/ee/api/rest/)\n- [Extend with GitLab](https://docs.gitlab.com/ee/api/)",[231,9],{"slug":4493,"featured":6,"template":686},"guide-to-rest-api","content:en-us:blog:guide-to-rest-api.yml","Guide To Rest Api","en-us/blog/guide-to-rest-api.yml","en-us/blog/guide-to-rest-api",{"_path":4499,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4500,"content":4505,"config":4510,"_id":4512,"_type":14,"title":4513,"_source":16,"_file":4514,"_stem":4515,"_extension":19},"/en-us/blog/have-devops-jobs-to-fill-try-these-3-strategies-to-hire-and-retain",{"title":4501,"description":4502,"ogTitle":4501,"ogDescription":4502,"noIndex":6,"ogImage":928,"ogUrl":4503,"ogSiteName":670,"ogType":671,"canonicalUrls":4503,"schema":4504},"Have DevOps jobs to fill? Try these 3 strategies to hire and retain","So many DevOps jobs posted, so few options to fill them. Here's why hiring and retaining developers is tricky, and how 3 thoughtful strategies, including a DevOps platform, can help.","https://about.gitlab.com/blog/have-devops-jobs-to-fill-try-these-3-strategies-to-hire-and-retain","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Have DevOps jobs to fill? Try these 3 strategies to hire and retain\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2021-09-23\",\n      }",{"title":4501,"description":4502,"authors":4506,"heroImage":928,"date":4507,"body":4508,"category":769,"tags":4509},[851],"2021-09-23","\nIf every company is a software company, how do you stand out from the crowd when it comes to attracting developer talent and filling DevOps jobs?\n\nThere’s a well-known, and worldwide, shortage of software developers, especially those with expertise in DevOps. Worse still, demand for those roles is accelerating rapidly: The US Bureau of Labor Statistics predicts employment opportunities for devs and testers will [increase 22% between 2020 and 2030](https://www.bls.gov/ooh/computer-and-information-technology/software-developers.htm#tab-6). That growth rate means nearly 190,000 net new developer/QA/test jobs will be opening each year, according to the BLS. \n\nThat’s all a long way of saying things are tough out there. Organizations looking to expand, or even just maintain, their DevOps jobs momentum have to find unique ways to stand out from the crowd because, as [many surveys have shown](https://hired.com/state-of-software-engineers#report), salary alone is often insufficient to both attract and retain developer talent.\n\n**Elevating your DevOps skills? Join us at [Commit at KubeCon - Oct. 11!](/events/commit/)**\n\nHere are 3 ways organizations can create an environment where DevOps can thrive, boosting developer retention, job satisfaction and even “cool place to work” street cred.\n\n## Make (a few) cool tools rule\n\nDevelopers are known for their big love of tools. In our [2021 Global DevSecOps Survey](/developer-survey/), more than one-quarter of respondents said they used between 5 and ten tool chains, and more than half said each tool chain had an average of 5 tools on it. Do the math and it’s clear that’s a lot of tools, and according to [research on software developer job satisfaction](https://link.springer.com/chapter/10.1007/978-1-4842-4221-6_10) too much information (i.e., from **too many tools**) can lead to less productivity and unhappy developers.\n\nThe solution to this very common problem can be found by adopting a DevOps platform, a single application where every stage of DevOps is interconnected, visible and seamless. And make sure that platform can integrate with all the key, cutting edge, “must have” kinds of tools that developers like to put on their resumes, and everyone will benefit from this streamlined approach.\n\n## Pay attention to career education\n\nDevelopers are always willing to DIY career education. The latest Stack Overflow Survey found about 60% of their survey takers [taught themselves coding via an online source](https://insights.stackoverflow.com/survey/2021#developer-profile-experience) – but that doesn’t mean they wouldn’t value (and take advantage of) training opportunities from employers. In our 2021 survey, a majority of developers said they’re most excited to learn about AI/ML, while ops pros were looking for education around advanced programming languages. \n\nBy asking DevOps team members about their interests and needs, organizations can keep a pulse on training opportunities they could offer that will actually matter to their teams and potentially make filling DevOps jobs easier.\n\n## Be flexible about everything\n\nFrom working remotely to working part-time, it’s clear that developers want the option to mix it up if possible. The more options - like having the time to pursue a degree or a passion - given to DevOps team members, the more likely they are to be satisfied with their jobs. \n\nAlso, time to pursue some “off the books” projects is another smart company perk. Don’t forget the role open source projects played in the pandemic (here are [a few examples](https://www.newamerica.org/digital-impact-governance-initiative/reports/building-and-reusing-open-source-tools-government/open-source-project-hubs-for-covid-19/)), making an already important part of a developer’s role even more compelling. In fact, more than 69% of our survey respondents told us they were involved with at least one open source project in 2021, and that number was up 6% from 2020.  \n\n## Don't forget DevOps\n\nIt’s a temperamental DevOps job market, certainly, but organizations with healthy DevOps practices do have one secret weapon: DevOps itself. When we asked our 4,300+ survey takers what the top benefits of DevOps was, “happier developers” was near the top of the list. \n\n## Read more on DevOps careers: \t\t\n\n- [Best advice for your DevOps career? Keep on learning](/blog/best-advice-for-your-devops-career-keep-on-learning/)\n\n- [6 tips to make software developer hiring easier](/blog/6-tips-to-make-software-developer-hiring-easier/)\n\n- [Four tips to increase your DevOps salary](/blog/four-tips-to-increase-your-devops-salary/)\n\n- [DevOps salaries in 2021: Where do you rank?](/blog/a-look-at-devops-salaries/)\n\n",[9,813,2535],{"slug":4511,"featured":6,"template":686},"have-devops-jobs-to-fill-try-these-3-strategies-to-hire-and-retain","content:en-us:blog:have-devops-jobs-to-fill-try-these-3-strategies-to-hire-and-retain.yml","Have Devops Jobs To Fill Try These 3 Strategies To Hire And Retain","en-us/blog/have-devops-jobs-to-fill-try-these-3-strategies-to-hire-and-retain.yml","en-us/blog/have-devops-jobs-to-fill-try-these-3-strategies-to-hire-and-retain",{"_path":4517,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4518,"content":4524,"config":4529,"_id":4531,"_type":14,"title":4532,"_source":16,"_file":4533,"_stem":4534,"_extension":19},"/en-us/blog/heres-how-to-get-integrated-secure-coding-advice-in-gitlab",{"title":4519,"description":4520,"ogTitle":4519,"ogDescription":4520,"noIndex":6,"ogImage":4521,"ogUrl":4522,"ogSiteName":670,"ogType":671,"canonicalUrls":4522,"schema":4523},"How to get integrated secure coding advice in GitLab","Secure Code Warrior now offers integrated security training and guidance within the GitLab DevOps Platform.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662877/Blog/Hero%20Images/security-cover-new.png","https://about.gitlab.com/blog/heres-how-to-get-integrated-secure-coding-advice-in-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to get integrated secure coding advice in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-03-24\",\n      }",{"title":4519,"description":4520,"authors":4525,"heroImage":4521,"date":4526,"body":4527,"category":726,"tags":4528},[745],"2022-03-24","Busy developers want to write secure code and fix any issues. But they often lack the time and resources to get it done efficiently.\n\nTo resolve vulnerabilities faster, developers need actionable advice from trusted sources of secure coding right inside the tools they use every day. [Secure Code Warrior](https://www.securecodewarrior.com/) is proud to partner with GitLab to enable developers to ship safe code faster, utilizing actionable and highly relevant secure coding guidance that is accessible from within GitLab’s [DevOps Platform](/topics/devops-platform/). This integration was announced as part of [GitLab’s 14.9 release](/releases/2022/03/22/gitlab-14-9-released/#integrated-security-training).\n\n## Empower developers with actionable guidance integrated inside GitLab\n\nGitLab is enabling developer-led security by getting scan results into the hands of those who can make fixes fast. Secure Code Warrior further strengthens this vision by bringing to GitLab some of the world’s largest secure coding and remediation content (6500+ interactive coding challenges, 56+ languages:frameworks, 150+ vulnerability categories) that is used by hundreds of thousands of professional developers across many industries. With this integration, secure coding guidance that is highly relevant to the detected vulnerabilities is easily accessible to developers with the click of a link in GitLab.\n\n## How this integration delivers contextual secure coding training\n\nWhen GitLab’s vulnerability scanners detect code security issues in merge requests and/or pipeline scans, a security issue is created and the identified vulnerability descriptions or CWE IDs are added to the Vulnerability Details section. The integration uses the vulnerability information to get a link to learning resources that educate developers on finding and fixing that particular security problem.\n\n![Secure Code Warrior platform](https://about.gitlab.com/images/blogimages/scw1.png)\n\nFor example, if the vulnerability scanners detected a Cross-Site Request Forgery (CSRF) in the application code, the vulnerability detail would be updated with the relevant training link.\n\n## GitLab-Secure Code Warrior integration at a glance\n\nWhen users click on the link, they are taken to SCW’s platform as shown below.\n\n![Secure Code Warrior platform](https://about.gitlab.com/images/blogimages/scw2.png)\n\nBy completing an appropriate challenge they get the trusted guidance to resolve the CSRF vulnerability with confidence. This is also a highly effective way to retain the knowledge because:\n\n- Bite-sized coding challenges give developers targeted, hands-on skill building in that vulnerability, and how to resolve it\n- Contextual learning - presented in manageable chunks - continually reinforces good, secure coding patterns from a trusted source, not just enabling a patch\n- It reduces the time gap between learning and application of knowledge, ensuring lasting engagement and retention\n- Developers grow their muscle memory to recognize security issues while they code, eliminating common vulnerabilities from the start of software creation\n\n## Ship secure code faster with improved merge request rate\n\nAs more teams adopt this workflow path to resolve vulnerabilities faster, they will gradually improve their MR rate and release quality and create secure code at speed. By embedding secure coding training within developer workflows, this integration automates and scales remediation support to all development teams and lets AppSec focus on risk monitoring and strengthening the security posture of the organization.\n\nThe partnership between Secure Code Warrior and GitLab is just getting started; follow us as we enable developers to build and release secure software at speed. We’d love you to try it out, and your feedback can help shape the future of the product.\n\nGet more details on how to [enable this integration](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#enable-security-training-for-vulnerabilities).",[875,9,1158],{"slug":4530,"featured":6,"template":686},"heres-how-to-get-integrated-secure-coding-advice-in-gitlab","content:en-us:blog:heres-how-to-get-integrated-secure-coding-advice-in-gitlab.yml","Heres How To Get Integrated Secure Coding Advice In Gitlab","en-us/blog/heres-how-to-get-integrated-secure-coding-advice-in-gitlab.yml","en-us/blog/heres-how-to-get-integrated-secure-coding-advice-in-gitlab",{"_path":4536,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4537,"content":4543,"config":4548,"_id":4550,"_type":14,"title":4551,"_source":16,"_file":4552,"_stem":4553,"_extension":19},"/en-us/blog/hiring-in-the-deep-end-of-the-talent-pool",{"title":4538,"description":4539,"ogTitle":4538,"ogDescription":4539,"noIndex":6,"ogImage":4540,"ogUrl":4541,"ogSiteName":670,"ogType":671,"canonicalUrls":4541,"schema":4542},"DevOps hiring from the deep end of the talent pool","Time to broaden your search beyond four-year tech degrees to candidates with life experience, self-taught/bootcamp chops, and soft skills.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663743/Blog/Hero%20Images/three-things-i-learned-in-my-first-month-at-gitlab.jpg","https://about.gitlab.com/blog/hiring-in-the-deep-end-of-the-talent-pool","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevOps hiring from the deep end of the talent pool\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"PJ Metz\"}],\n        \"datePublished\": \"2022-02-01\",\n      }",{"title":4538,"description":4539,"authors":4544,"heroImage":4540,"date":4545,"body":4546,"category":813,"tags":4547},[831],"2022-02-01","\n\nWhen it comes to DevOps hiring, many companies have only waded into the shallow end of the pool to find talent. They’ve relied on a steady stream of university graduates rather than being a little more creative in their search and realizing that many DevOps professionals are created outside of traditional four-year institutions. So let’s explore the DevOps hiring opportunities that await in the deep end of the talent pool. \n\n## Non-traditional paths\n\nWhile four-year degrees are wonderful – the dream of a college education should be available to all who want it – the reality is there are a variety of barriers to attending college. For instance, many people don’t realize the opportunity within tech until later in their life, after they’ve already earned a different degree or have been working for several years in a different field. To get their DevOps skills, they pursue an alternative route such as bootcamps or teaching themselves. In fact, non collegiate paths are increasingly common; according to a report by Hired, 45% of software engineers have a computer science degree, but 24% are self-taught, and another 10% learned how to code through a bootcamp program.\n\n### DevOps bootcamps\n\nBootcamps offer rigorous and thorough training on a very specific set of tech-related skills such as full-stack web development, data science, and UX/UI design. These virtual or in-person programs are full-time or part-time, and there are many bootcamps available all over the world. Bootcamps are appealing because of their affordable cost and shorter time investment, compared to earning a four-year degree.\n\nOrganizations aiming to train their engineering teams like bootcamps because they can focus exclusively on skills needed for the job, modern frameworks and languages, and real-world applications, making their graduates well-versed in building today’s apps. \n\n### Self-taught DevOps\n\nDevelopers often learn to code while working in another career. Former educators, EMTs, and office managers everywhere are dedicating time outside of their professions to learning to write code and build apps.\n\nSelf-taught developers are an asset to a company because they have the discipline to learn tough concepts on their own and find ways to apply them. This is something that many other avenues of learning don’t offer. Because they had to find their own way, self-taught developers often have unique perspectives and the ability to problem-solve that are valuable to teams at all levels of tech. \n\nDevOps is a unique career in that proving what you know is incredibly important. So when it's time for DevOps hiring, don’t exclude these other paths. Where someone learned isn’t as important as what they learned — or what they can build. So when you’re hiring, or creating a bot that chooses which resumes and applications to move forward with, make sure you’re not [getting rid of qualified candidates](https://www.forbes.com/sites/jackkelly/2021/09/07/harvard-business-school-study-says-software-overlooks-millions-of-qualified-job-candidates-heres-how-to-fight-back-against-the-bots/?sh=5a0f1ff813d3). \n\n## Why non-traditional?\n\nWorkers with non-tech backgrounds bring a lot to the table that should be considered along with their skill sets.\n\n### Diversify the knowledge\n\nPeople who choose non-traditional routes to tech are often people who have already been working at a professional level in another career. When you hire someone who took a different path to tech than college, you can benefit from their expertise as well as their experience from their former career. For instance, a former teacher brings the ability to present information and explain new concepts – both skills are essential in DevOps. Similarly, an office manager would know how to manage several different projects at once and understand how to work well within teams. \n\nIf diversity is important to your company, as it should be to every company, then looking beyond hiring only candidates with computer science degrees will help you to find that talent. Statistically speaking, [women and people of color are currently heavily under-represented in computer science programs across the U.S.](https://www.dukechronicle.com/article/2020/06/major-madness-racial-and-gender-equity-in-computer-science), so they are likely to have other experiences such as bootcamps or be self-taught that would still qualify them for some of your DevOps hiring needs.   \n\n### Soft skills \n\nJust as important as technical skills are interpersonal skills, a.k.a. soft skills. [Teamwork, collaboration, and communication](/blog/soft-skills-are-the-key-to-your-devops-career-advancement/) are sometimes dismissed as secondary or unimportant compared to the candidate’s ability to write code. However, these skills are essential when working within a team and across an organization. Because [team-based work is central](/blog/future-proof-your-developer-career/#embrace-the-soft-skills) to implementing DevOps strategies, you should be hiring people with great soft skills to reduce friction. It is beneficial to the company to have a professional who can communicate with the lines of business and the C-suite about projects and goals. A college grad may have had internships and work experience, but a marketing designer or restaurant manager who has been in their respective businesses for 10 years already knows how to navigate a deadline-oriented workplace and communicate requirements to other stakeholders. \n\nAs you assemble your DevOps hiring strategy, don’t close yourself off to the idea of a non-traditional candidate. You might be surprised at how much they will enhance the dynamics of your DevOps team.\n",[9,813,267],{"slug":4549,"featured":6,"template":686},"hiring-in-the-deep-end-of-the-talent-pool","content:en-us:blog:hiring-in-the-deep-end-of-the-talent-pool.yml","Hiring In The Deep End Of The Talent Pool","en-us/blog/hiring-in-the-deep-end-of-the-talent-pool.yml","en-us/blog/hiring-in-the-deep-end-of-the-talent-pool",{"_path":4555,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4556,"content":4562,"config":4566,"_id":4568,"_type":14,"title":4569,"_source":16,"_file":4570,"_stem":4571,"_extension":19},"/en-us/blog/how-a-devops-platform-can-help-solve-5-key-smb-frustrations",{"title":4557,"description":4558,"ogTitle":4557,"ogDescription":4558,"noIndex":6,"ogImage":4559,"ogUrl":4560,"ogSiteName":670,"ogType":671,"canonicalUrls":4560,"schema":4561},"How a DevOps platform can help solve 5 key SMB frustrations","SMBs already wear all of the hats. Here are 5 ways a DevOps platform can ease the burden.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668242/Blog/Hero%20Images/assembly-3830652.jpg","https://about.gitlab.com/blog/how-a-devops-platform-can-help-solve-5-key-smb-frustrations","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How a DevOps platform can help solve 5 key SMB frustrations\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-04-25\",\n      }",{"title":4557,"description":4558,"authors":4563,"heroImage":4559,"date":1708,"body":4564,"category":769,"tags":4565},[810],"\n\nStart-ups and small or medium-sized businesses (SMBs) face plenty of challenges, but several of those hurdles can be eased by [adopting a DevOps platform](https://page.gitlab.com/resources-ebook-smb-beginners-guide-devops.html). A DevOps platform can help not only address the issue at hand but the benefits can spread across the company, [helping it grow in a competitive and unpredictable market](/blog/6-ways-smbs-can-leverage-the-power-of-a-devops-platform/).\n\nThe United States alone is home to 32.5 million small businesses, making up 99.9 percent of all companies in the country, according to a [2021 report from the Small Business Administration’s Office of Advocacy](https://cdn.advocacy.sba.gov/wp-content/uploads/2021/08/30143723/Small-Business-Economic-Profile-US.pdf). And all of these companies have a tough road to travel – so tough that 20 percent of U.S. small businesses fail within the first year, according to the [U.S. Bureau of Labor Statistics](https://www.bls.gov/bdm/entrepreneurship/entrepreneurship.htm). By the end of the fifth year, about 50 percent are shuttered.\n\nStressed with common problems like worker overload, finding time for collaboration, and meeting customer and market needs, smaller businesses are under a lot of pressure. With SMBs and small or medium-sized enterprises (SMEs) facing such significant challenges, it only makes sense to streamline software development, [speed up deployments](/blog/pipelines-as-code/), automate repetitive tasks and [foster collaboration](/blog/collaboration-communication-best-practices/). Taking all those steps can greatly improve an SMB’s odds of success. \n\nHere’s how a DevOps platform can help take on some major SMB frustrations:\n\n## Ease worker fatigue and improve work/life balance\n\nSMBs, by definition, have fewer employees than their larger, more-established competitors. That means there are fewer people to take on all the tasks that need to be done. And that’s no different for the software development team, which could very well be a team of one. With everyone in an SMB having to wear so many hats and take on so many different jobs, it can be exhausting. That’s not only hard on productivity, it’s hard on employees’ work/life balance, and therefore not good for the business or the workforce.\n\nA DevOps platform offers an environment that fosters communication, collaboration and automation, which help ease the burdens on the IT staff. This will help [get work done more efficiently and faster](/blog/why-improving-continuously-speeds-up-delivery/), leaving employees with more time for other projects.\n\n## Satisfy customers\n\nHow can you find new customers when you’re not a household name? You do it by keeping the buyers you have and pulling in more by satisfying, and even delighting, your customer base. Satisfied consumers stick around, buy more, and give free word-of-mouth marketing.\n\nA DevOps platform helps SMBs create customer satisfaction by automating the customer feedback process and accelerating [software development and deployment](/blog/how-to-keep-up-with-ci-cd-best-practices/). \n\n## Increase communication and collaboration\n\nWorkers in start-ups and small businesses often take on a multitude of projects, and try to chip away at their burgeoning workflows. Meetings – within a department or cross-functional – may be either low priority or tough to arrange. A “heads’ down” attitude is understandable, but means different demographics and perspectives often won’t come together to [better innovate](/blog/pipelines-as-code/) and create more well-rounded products for a wider range of consumers. \n\nA DevOps platform promotes collaboration by eliminating barriers not just between IT workers but within an entire company. And that leads to more innovative features and products, improves productivity, and keeps employees happier and more engaged. Collaborative workers also are continuously learning from each other.\n\n## Adapt to the market with speed and agility\n\nEvery market can be unpredictable. New competitors appear. Customer expectations shift. Supply chain problems affect production. SMBs need to be able to change on a dime, to meet or get ahead of new demands and even new competitors.\n\nA DevOps platform [can keep a business of any size agile](/blog/can-an-smb-or-start-up-be-too-small-for-a-devops-platform/) by enabling a tech team to scale development and deployment to quickly and efficiently turning ideas into new features or new products.\n\n## Multiply a small business’ tech muscle\n\nSince small businesses, by definition, have fewer people, they obviously have smaller IT departments. They may even have a department of one. That can make it difficult to design, develop and deploy new software, not to mention come up with new and better ways to serve and communicate with customers and the supply chain. When [project planning is a joint, cross-functional effort](/blog/achieve-devsecops-collaboration/) it’s possible to do more with less. And having fewer DevOps tools involved - even having everyone use the same tool - can make a big difference.\n\nA DevOps platform, with automated options for everything from testing to monitoring and [doing GitOps with GitLab](/blog/the-ultimate-guide-to-gitops-with-gitlab/), can lessen the hands-on workload, giving IT people more time for other, more creative, projects.\n",[9,793,749],{"slug":4567,"featured":6,"template":686},"how-a-devops-platform-can-help-solve-5-key-smb-frustrations","content:en-us:blog:how-a-devops-platform-can-help-solve-5-key-smb-frustrations.yml","How A Devops Platform Can Help Solve 5 Key Smb Frustrations","en-us/blog/how-a-devops-platform-can-help-solve-5-key-smb-frustrations.yml","en-us/blog/how-a-devops-platform-can-help-solve-5-key-smb-frustrations",{"_path":4573,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4574,"content":4579,"config":4584,"_id":4586,"_type":14,"title":4587,"_source":16,"_file":4588,"_stem":4589,"_extension":19},"/en-us/blog/how-automation-is-making-devops-pros-jobs-easier",{"title":4575,"description":4576,"ogTitle":4575,"ogDescription":4576,"noIndex":6,"ogImage":2547,"ogUrl":4577,"ogSiteName":670,"ogType":671,"canonicalUrls":4577,"schema":4578},"How automation is making DevOps pros’ jobs easier","Six ways automation in a DevSecOps platform aids security, monitoring, compliance, and CI/CD.","https://about.gitlab.com/blog/how-automation-is-making-devops-pros-jobs-easier","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How automation is making DevOps pros’ jobs easier\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-12-12\",\n      }",{"title":4575,"description":4576,"authors":4580,"heroImage":2547,"date":4581,"body":4582,"category":769,"tags":4583},[810],"2022-12-12","\nAs DevOps professionals look for ways to save time, money, and tech muscle as they work to push better and more secure software out the door, they’re increasingly seeing the advantages of automation — and that those advantages seamlessly come with adopting an end-to-end [DevSecOps](/topics/devsecops/) platform. \n\nIn a 2022 GitLab quiz, more than 82% of respondents said automation plays a “vital” role in developing and deploying safer and faster releases. \n\nIt’s clear that DevOps professionals are realizing that automation minimizes the need for a lot of extra hands-on and time-consuming work, like backup, installation, and maintenance. It also can reduce the potential for human error and provide consistency. A DevSecOps platform, unlike a cobbled-together [DIY toolchain](https://page.gitlab.com/resources-ebook-trading-diy-devops-for-a-single-platform.html), offers many advantages, like visibility and collaboration. Another major benefit is that it offers automation for everything from alerts to [testing](/blog/want-faster-releases-your-answer-lies-in-automated-software-testing/) and monitoring.\n\n## Benefits of DevSecOps automation\n\nHere is how automation throughout the software lifecycle could help DevOps teams cut time and money spent on repetitive tasks, eliminate human errors, and streamline the whole DevOps process:\n\n1. Security – A critical benefit of migrating to a full DevSecOps platform is that software won’t simply get a security test at the end of the pipeline – an inefficient, and often costly, feedback system. When [security is shifted left](/blog/efficient-devsecops-nine-tips-shift-left/), if a vulnerability or compliance issue is introduced into the code, it’s identified almost immediately thanks to automated and consistent testing. Automation built into a DevOps platform leads to better software and reduces the time between designing new, higher-quality features and rolling them out into production. And that maximizes the overall return on software development.\n\n2. Compliance – With a single DevSecOps application, [compliance confirmation](/stages-devops-lifecycle/govern/) lives within the platform and is automated. That means professionals can verify the compliance of their code without leaving their workflow, removing the need for compliance managers to require developers to context switch among different point solutions in a DIY toolchain, which can lead to the loss of productivity and efficiency. \n\n3. Configuration – It’s a complicated job to set up, manage, and maintain application environments. [Automated configuration management](/stages-devops-lifecycle/configure/) is designed to handle these complex environments across servers, networks, and storage systems.\n\n4. Continuous integration (CI) – This is the step that enables the DevOps practice of iteration by committing changes to a shared source code repository early and often – often several times a day. [CI](/blog/basics-of-gitlab-ci-updated/) is all about efficiency. By automating manual work and testing code more frequently, teams can iterate faster and deploy new features with fewer bugs more often.\n\n5. Continuous delivery (CD) – This is a software development process that works in conjunction with continuous integration to automate the application release process. When [deployments are handled automatically](/blog/cd-automated-integrated/), software release [processes are low-risk, consistent, and repeatable](/blog/boring-solutions-faster-iteration/). \n\n6. Monitoring – This is a proactive, automated part of the process, focused on tracking software, infrastructure, and networks to trace status and raise alerts to problems. [Monitoring](/stages-devops-lifecycle/monitor/) increases security, reliability, and agility. \n\n## Automation by the numbers\n\nIn fact, the [GitLab 2022 Global DevSecOps Survey](https://learn.gitlab.com/dev-survey-22/2022-devsecops-report), which polled more than 5,000 DevSecOps professionals, showed that automation is becoming increasingly critical to all DevOps teams.\n\nThe survey found that 47% of teams report their testing is fully automated today, up from 25% last year. Another 21% plan to roll out test automation at some point in 2022, and 15% hope to do so in the next two or more years. And three-quarters of respondents told us their teams use a DevSecOps platform or plan to use one this year. \n\nWhy are they using a platform? Well, security professionals called out easier automation and more streamlined deployments.\n\n## Fewer repetitive and unnecessary tasks\n\nSo what is all of this automation enabling DevOps professionals to do? They’re able to let go of a lot of work. \n \nAccording to the DevSecOps Survey, respondents said they’ve been able to reduce a lot of repetitive tasks. For instance, they say they no longer have to do as much infrastructure “handholding” — they’re not manually testing their code, writing messy code, and ignoring code quality. \n \nWith automation, each task is performed identically and with consistency, reliability, and accuracy. This promotes speed and increases deliveries, and, ultimately, deployments. While it doesn’t remove humans from the picture, automation minimizes dependency on humans for managing recurring tasks. \n\nAnd with GitLab’s single, end-to-end DevSecOps platform, automation is a system feature and not something that has to be added in. Automation with the GitLab platform is ready to go. Check out the [“Ditching DIY DevOps for GitLab’s Single Platform”](https://page.gitlab.com/resources-ebook-trading-diy-devops-for-a-single-platform.html) to learn more ways a platform can help DevOps teams.\n",[9,749,976,977],{"slug":4585,"featured":6,"template":686},"how-automation-is-making-devops-pros-jobs-easier","content:en-us:blog:how-automation-is-making-devops-pros-jobs-easier.yml","How Automation Is Making Devops Pros Jobs Easier","en-us/blog/how-automation-is-making-devops-pros-jobs-easier.yml","en-us/blog/how-automation-is-making-devops-pros-jobs-easier",{"_path":4591,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4592,"content":4598,"config":4604,"_id":4606,"_type":14,"title":4607,"_source":16,"_file":4608,"_stem":4609,"_extension":19},"/en-us/blog/how-devops-and-gitlab-cicd-enhance-a-frontend-workflow",{"title":4593,"description":4594,"ogTitle":4593,"ogDescription":4594,"noIndex":6,"ogImage":4595,"ogUrl":4596,"ogSiteName":670,"ogType":671,"canonicalUrls":4596,"schema":4597},"How DevOps and GitLab CI/CD enhance a frontend workflow","The GitLab frontend team uses DevOps and CI/CD to ensure code consistency, fast delivery, and simple automation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679026/Blog/Hero%20Images/frontendworkflow.jpg","https://about.gitlab.com/blog/how-devops-and-gitlab-cicd-enhance-a-frontend-workflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How DevOps and GitLab CI/CD enhance a frontend workflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"José Iván Vargas\"}],\n        \"datePublished\": \"2018-08-09\",\n      }",{"title":4593,"description":4594,"authors":4599,"heroImage":4595,"date":4601,"body":4602,"category":791,"tags":4603},[4600],"José Iván Vargas","2018-08-09","\nIt might seem like a lot of what we do on frontend is to make our lives easier,\nbut what I’ve learned in the past two years as a GitLab team-member and a community contributor\nis that if we make our lives easier, we can make a lot of customers happier, too.\nOver the years, I’ve experienced many changes at GitLab, from a change in processes\nto an increase in team members. From an early stage, the frontend team has been\ncommitted to continuous improvements, but working in a rapidly growing team\nrequired an investment in the way we work.\n\nWhen I joined GitLab we still used some of the default conventions that the [Rails\nframework](/blog/upgrade-to-rails5/) recommended for the frontend, and it helped us for quite a while, but\nthe more code we touched, the more code we needed to test and build for\nperformance, making it more challenging for us to maintain. The frontend team\nrealized that we needed a way to facilitate code consistency, fast delivery, and\nsimple automation, so we decided to incorporate [DevOps](/topics/devops/) and\n[CI/CD](/solutions/continuous-integration/) into our workflow.\n\n## Frontend DevOps and CI/CD workflow\n\nWe used CI in a few scenarios, including using linters to help write a consistent\nstyle of code throughout GitLab, but in the case of our JavaScript code, we\nrealized that building for performance and maintainability was becoming\nincreasingly difficult. So, we moved away from the\n[asset pipeline and utilized webpack](/blog/vue-big-plan/),\nwhich has given us a series of benefits. For example,  when we develop locally,\ndebugging code is now a breeze, and the jobs that are frontend related run on\nproduction-bundled code, ensuring a testing environment that closely resembles\nthat of a user.\n\nAfter CI, we publish code using DevOps by hosting it with\n[GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/)). We’ve seen several projects benefit from\nadopting a DevOps model, including\n[GitLab SVG libraries](https://gitlab.com/gitlab-org/gitlab-svgs) and\n[Trello Power-Up](https://docs.gitlab.com/ee/integration/trello_power_up.html).\n\nWhen we created GitLab SVG libraries, we wanted to use them for ourselves and\nmake them available to the general public, so whenever we publish a new version,\nwe use GitLab Pages so that it’s fully automated every time.\n\nWith the Trello Power-Up plugin, we use DevOps to address compatibility\nissues when a new version of Trello is released. GitLab Pages makes it easy to\ndeploy a new version, in a fast and diligent manner, so that it’s accessible in\nthe Trello Marketplace as quickly as possible.\n\n## Frontend DevOps and Data-driven efforts\n\nIncorporating frontend DevOps and CI/CD into the workflow has had a significant\nimpact on efficiency and results. We have greater insight into our operations\nand have metrics to help us detect major areas of improvement. We set up\n[Sitespeed](https://www.sitespeed.io) using Kubernetes to analyze sets of pages\nand provide reports on anything that could hamper our users’ perceived\nperformance, from CSS and JavaScript bundle sizes to accessibility issues and\nthe render time differences between various points in time. The information we gathered using\nSitespeed has helped us improve the merge requests page and identify pages that\nrender slowly. Having more data has changed the way we approach problems at\nGitLab, because we are able to focus our efforts on specific areas.\n\n## The unexpected discovery of problems\n\nOne of the unexpected benefits of our workflow is the discovery of problems that\nwe may not have identified.\n\n### A lack of automation\n\nWe realized, for example, that we lack some automation in our tools. For\ninstance, every time we didn’t format code in a specific way, our linter\nnotified us, but analyzing and fixing the code slowed down developer velocity,\nso we decided to add [Prettier](https://prettier.io/) to format our code in our\nmerge requests for us. We also realized that, sometimes, we need a little bit of\nautomation when we publish code. As an all-remote company, many of us work on\npublic WiFi, and we found that unreliable connections could have detrimental\neffects while deploying code. The combination of CI and DevOps made deployments\neasier. If we triggered a pipeline and a coffee shop WiFi goes vamoose, it\ndoesn't matter. We already automated a significant part of our development\nprocess, but we’re always striving for more.\n\n### A lack of speed\n\nIn the case of CI, we noticed that our own tools can be a source of problems. We\nfound that we didn’t make the necessary considerations to keep our test suite fast.\nAs developers, we want to go back to developing as fast as possible. A few of my\nteammates discovered that our test runs were becoming slower and slower with each\nrelease. Even though these are not customer-facing changes, it has made both\nproduct managers and team managers consider investing in those issues, because\nthe easier the development cycle is for the developers involved, the better it\nis for our customers, since we can deliver even more features. Furthermore, we\ncan prevent regressions from happening by having solid foundations, such as\ntesting, code style, and code formatting.\n\nEvery time we discover problems that affect us or our work, we realize that we\ncan also jeopardize the features and experiences we want to deliver to our\ncustomers. It has changed the culture inside the team, because we view\nperformance issues as developers rather than as GitLab team-members.\n\n## Advice to frontend teams\n\nUsing DevOps and CI/CD in a frontend workflow is compatible with teams of any\nsize, including small teams that may want to ensure that their code styling is\nthe same.\n\n### Put a linter in place\n\nWith CI, the smallest and perhaps one of the most significant steps is\nto put a linter in place, and if the pipeline doesn't pass, you can’t merge the\ncode. That's such a simple, effective way to improve your code and to keep it\ntidy and clean in the long run. Just setting up some simple steps using CI will\nimprove your team’s code and your developers’ quality of life so that they don't\nhave to worry about combing through past code. Even though small teams might not\nfind the value in the short term, when they scale, they certainly will.\n\n### Create consistent scenarios\n\nThe bigger the project, the more you realize that some of your tooling ends up\nrunning locally, and it's beneficial to run it on CI. If something doesn't work\non a generic type of machine that has enough dependencies installed to run your\nCI setup, that means there’s something wrong and that you should probably fix it\nbefore merging your code. As long as you can create a consistent scenario in which\nyou can do things like testing and linting, you should be in a good position to\ndeliver a great product.\n\n### Select CI-compatible tools\n\nFor teams of all sizes, it’s important that the tools you select as part of your\nworkflow are compatible with CI in some way, so that even if you had a big part\nof your workflow running locally, you can easily move to CI by creating a pipeline\nthat resembles that of your daily workflow. Regardless of the tool that you choose,\ncreating a job for it will return a lot of value in the long run. If it makes\nsense, I encourage you to add it, because there’s very little incentive not to.\nCI-compatible tools include tests runners, linters, Prettier, or any custom-made\ntools that help you in some way. One decision you might want to avoid is creating\non servers that live on CI runners. Since they only run for a limited amount of\ntime, these servers will stop existing. You could also add deployments to your\nCI workflow, helping you with DevOps and preventing you from worrying about\ncomplicated local setups for new developers. The possibilities are huge.\n\n### Add performance testing\n\nTo add to the pool of possibilities, why not add performance testing to your\nmerge requests with a tool such as\n[Lighthouse](https://developers.google.com/web/tools/lighthouse/), which can\nhelp you understand potential performance bottlenecks in your website. Or, maybe\nyour team can add the ability to generate code documentation and publish it via\nGitLab Pages. CI/CD can be a really good tool, because it will return something\nimmediately. It's just a matter of how you want to use it, depending on your needs.\n\nThe more the frontend team uses CI and DevOps, the more we discover ways to use\nit, so it’s worth it to us to invest in this tool.\n\nSometimes, we just want to\nget stuff out there without too much consideration for tooling and CI and CD,\nbut because of the benefits we’ve experienced, we now include CI/CD in all of\nour projects. With GitLab, everything is integrated, so why skip it? Instead of\nfighting against automation, I encourage teams to embrace the idea that CI is\nthere to help you.\n\n[Cover image](https://unsplash.com/photos/UbGqwmzQqZM) by\n[Zhipeng Ya](https://unsplash.com/photos/UbGqwmzQqZM?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText), licensed\nunder [CC X](https://unsplash.com/license).\n{: .note}\n",[3173,683,976,9],{"slug":4605,"featured":6,"template":686},"how-devops-and-gitlab-cicd-enhance-a-frontend-workflow","content:en-us:blog:how-devops-and-gitlab-cicd-enhance-a-frontend-workflow.yml","How Devops And Gitlab Cicd Enhance A Frontend Workflow","en-us/blog/how-devops-and-gitlab-cicd-enhance-a-frontend-workflow.yml","en-us/blog/how-devops-and-gitlab-cicd-enhance-a-frontend-workflow",{"_path":4611,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4612,"content":4618,"config":4624,"_id":4626,"_type":14,"title":4627,"_source":16,"_file":4628,"_stem":4629,"_extension":19},"/en-us/blog/how-do-we-handle-engineering-led-initiatives-that-dont-belong-to-one-team",{"title":4613,"description":4614,"ogTitle":4613,"ogDescription":4614,"noIndex":6,"ogImage":4615,"ogUrl":4616,"ogSiteName":670,"ogType":671,"canonicalUrls":4616,"schema":4617},"How do we handle engineering-led issues that don't belong to one team?","A recent issue sparked a lively discussion between engineering and product leadership about how 'cross-vertical' issues should be prioritized to avoid the bystander effect.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678916/Blog/Hero%20Images/how-do-we-handle-engineering-led-initiatives.jpg","https://about.gitlab.com/blog/how-do-we-handle-engineering-led-initiatives-that-dont-belong-to-one-team","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How do we handle engineering-led issues that don't belong to one team?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emily von Hoffmann\"}],\n        \"datePublished\": \"2018-10-30\",\n      }",{"title":4613,"description":4614,"authors":4619,"heroImage":4615,"date":4621,"body":4622,"category":299,"tags":4623},[4620],"Emily von Hoffmann","2018-10-30","\nThe GitLab engineering team is split according to [product category](/handbook/product/categories/), so that team members in each category can [focus, specialize, and collaborate](/blog/configure-post/) on the same issues at the same time. They are semi-siloed by design, so what happens to issues, like tech debt, that are everyone and no one’s responsibility?\n\nThe short answer is, teams are still figuring it out. A recent [issue](https://gitlab.com/gitlab-org/gitlab-ce/issues/52150) sparked a lively discussion and video call, which you can watch below. Listen in below on the discussion between engineering and product leadership about how technical debt or other engineering initiatives that are \"cross-vertical\" (that is, touch on many different product areas) should be prioritized given that there isn't one clear point of contact or responsibility for those issues.\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://www.youtube.com/embed/3ZEI4W_Cb2g\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n### The gist\n\nThe issue that started it all had to do with a task that would have been assigned to the former Platform team, which used to be a catch-all that has since been split up into Create and Manage. Engineering Manager, Create [Douwe Maan](/company/team/#DouweM) explains, “With all backend teams now focused on specific product areas... there is no team to take on these kinds of backend-wide, non-product-area specific issues anymore.”\n\nHe continues, “Issues like this affect all backend teams equally, so we fall prey to the bystander effect. When an engineering manager gets to make room in a given release for an engineering-led initiative, they have the choice between issues like this, that any team could pick up, and product area-specific issues, that aren't going to get done unless their team does it, so the latter will have a far higher chance of being picked. Everyone cares about these kinds of issues, which means no one cares... there are many issues (technical debt and otherwise) that aren't currently anyone's responsibility, so they won't get done.”\n\nThis felt like a recurring problem due to other recent examples of cross-vertical initiatives stalling, like this issue to [switch to Rails 5 in production](https://gitlab.com/gitlab-org/gitlab-ce/issues/48991), and this issue to [update GitLab's referrer policy](https://gitlab.com/gitlab-org/gitlab-ce/issues/39147).\n\n### The research\n\nWe've heard from our community that this is a common problem, especially when working with others in different functions. In [recent interviews](https://drive.google.com/file/d/1A5mSNoPJydjcWKE4rdO2287sjnABxGDA/view) with 15 DevOps engineers, many expressed their frustration at the amount of reactive work and rework that they face, and identified a lack of successful coordination and empathy between different teams as the culprit. One interviewee said he thought this is inherent to working with some functions. Because of how release schedules work for developers and security engineers, he thinks these groups are the least likely to feel they are able to assign cycles to some proactive tasks, like fixing technical debt before it's critical.\n\nThe nearly 20 [software engineers](https://drive.google.com/file/d/1EVrjVcgIBbuNf4Gwenajsiy6Wv9HsTJw/view) we [interviewed](https://drive.google.com/file/d/15GksPiH0xmy4nRhylhMDIWmuvdHMWof4/view) also brought up their frustration at the way that technical debt can transform a seemingly simple task into a massive effort requiring them to rewrite or refactor a large chunk of code. More than the time spent on these tasks, several developers mentioned their concern that others might see them as dragging their feet and becoming a blocker when they take the time to resolve the technical debt. After all, it was just \"a simple task.\"\n\nThe responsibility to fix these issues becomes even more muddied when no particular team owns them. One [study of 95 teams in 25 leading corporations found that the majority of cross-functional teams are dysfunctional](https://hbr.org/2015/06/75-of-cross-functional-teams-are-dysfunctional), in large part because siloes self-perpetuate. The authors argue the solution is to create a “Portfolio Governance Team (PGT), where high-level leaders make complex decisions on the various projects in their portfolio together.\" The number one rule for making a PGT successful? \"Every project should have an end-to-end accountable leader.\"\n\n### The fix\n\nAlong these lines, one long-term solution being discussed at GitLab is establishing a dedicated team that will transcend the product areas and be responsible for these murky in-between issues. But Director of Engineering, Dev Backend [Tommy Morgan](/company/team/#itstommymorgan) adds, “Even if we had a team that was in place to handle issues like this one, there will always be boundary conditions. As Product is responsible for prioritizing work, if we need to do any horse-trading or other determination to figure out where the work should land, I think that's something that Product should work out.”\n\nShort of creating a new team, Product Managers and Engineering Managers will need to frankly discuss their own priorities and incentives in order to get these tasks scheduled.\n\nWhat has your org tried? Is it working? Tweet us [@gitlab](https://twitter.com/gitlab).\n\n[Photo](https://unsplash.com/photos/fIq0tET6llw) by [Diego PH](https://unsplash.com/@jdiegoph) on Unsplash.\n{: .note}\n",[749,9,728,683],{"slug":4625,"featured":6,"template":686},"how-do-we-handle-engineering-led-initiatives-that-dont-belong-to-one-team","content:en-us:blog:how-do-we-handle-engineering-led-initiatives-that-dont-belong-to-one-team.yml","How Do We Handle Engineering Led Initiatives That Dont Belong To One Team","en-us/blog/how-do-we-handle-engineering-led-initiatives-that-dont-belong-to-one-team.yml","en-us/blog/how-do-we-handle-engineering-led-initiatives-that-dont-belong-to-one-team",{"_path":4631,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4632,"content":4638,"config":4644,"_id":4646,"_type":14,"title":4647,"_source":16,"_file":4648,"_stem":4649,"_extension":19},"/en-us/blog/how-gitlab-can-help-you-secure-your-cloud-native-applications",{"title":4633,"description":4634,"ogTitle":4633,"ogDescription":4634,"noIndex":6,"ogImage":4635,"ogUrl":4636,"ogSiteName":670,"ogType":671,"canonicalUrls":4636,"schema":4637},"How GitLab improves cloud native application security and protection","In this article, we will show you how GitLab can help you streamline your cloud native application security from a code and operations point of view by providing you with real-world examples.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664102/Blog/Hero%20Images/gitlab-values-cover.png","https://about.gitlab.com/blog/how-gitlab-can-help-you-secure-your-cloud-native-applications","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab improves cloud native application security and protection\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nico Meisenzahl\"}],\n        \"datePublished\": \"2020-08-18\",\n      }",{"title":4633,"description":4634,"authors":4639,"heroImage":4635,"date":4641,"body":4642,"category":769,"tags":4643},[4640],"Nico Meisenzahl","2020-08-18","\n{::options parse_block_html=\"true\" /}\n\nIn the [cloud-native](/topics/cloud-native/) ecosystem, decisions and changes are made on a rapid basis. Applications get adapted and deployed multiple times a week or even day. Microservices get developed decentralized with different peoples and teams involved. In such an environment, it is crucial to ensure that applications are developed and operated safely. This can be done by shifting security left into the developer lifecycle but also by using DevSecOps to empower operations with enhanced monitoring and protection for the application runtime.\n\nIn this article, I would like to show you how GitLab can help you streamline your application security from a code and operations point of view by providing you with real-world examples. Before we deep dive into the example, let me first introduce you to the [GitLab Secure](https://about.gitlab.com/stages-devops-lifecycle/secure/) and [GitLab Protect](https://about.gitlab.com/stages-devops-lifecycle/govern/) product portfolio which are the foundation for this. GitLab Secure helps developers to enable accurate, automated, and continuous assessment of their applications by proactively identifying vulnerabilities and weaknesses and therefore minimizing security risk. GitLab Protect, on the other hand, supports operations by proactively protecting environments and cloud-native applications by providing context-aware technologies to reduce overall security risk. Both are backed by leading open-source projects that have been fully integrated into developer and operation processes and the GitLab user interface (UI).\n\n## Cloud Native Application Security: The attack\n\nLet’s assume we have an application hosting a web interface that allows a user to provide some input. The application is written in [Golang](https://golang.org/) and executes the input as part of an external operating system command ([os/exec](https://golang.org/pkg/os/exec/)). The application does not contain any validation or security features to validate the input, which allows us to inject additional commands that are also executed in the application environment.\n\nThe application is running as containerized microservices in a Kubernetes cluster. The Kubernetes Cluster is shared across multiple teams and projects, allowing us to inject and read data in another application running next to ours. In our example, we will connect an unsecured Redis instance in a different Namespace and read/write data.\n\nNow let us take a closer look at how GitLab can help us detect the attack, permit its execution, and finally help us find and fix the root cause in our code.\n\n## Container Host Security\n\n[Container Host Security](/stages-devops-lifecycle/govern/) helps us to detect an attack in real-time by monitoring the pod for any unusual activity. It can then alert operations with detailed information on the attack itself.\n\nContainer Host Security is powered by [Falco](https://falco.org/), an open-source runtime security tool that listens to the Linux kernel using eBPF. Falco parses system calls and asserts the stream against a configurable rules engine in real-time. The Falco deployment used by Container Host Security can be deployed and fully managed using [GitLab Managed Apps](https://docs.gitlab.com/ee/update/removals.html).\n\nIn our example, Falco detects the injected redis-cli command, which is used to read/write data into the unsecured Redis instance. \n\n![Container Host Security](https://about.gitlab.com/images/blogimages/2020-08-18-How-GitLab-Can-Help-You-Secure-Your-Cloud-Native-Applications/falco.png)\n\nFalco can now alert operations who can use those valuable insights to define and execute further steps. \n\n## Container Network Security\n\nA first step to permit access to the unsecured Redis instance would be to permit traffic between the application in our Kubernetes cluster. This can be done by using [Container Network Security](/stages-devops-lifecycle/govern/). Container Network Security is again fully managed by [GitLab Managed Apps](https://docs.gitlab.com/ee/update/removals.html) and can also be configured within the GitLab project user interface.\n\nContainer Network Security is powered by [Cilium](https://cilium.io/), an open-source networking plugin for Kubernetes that can be used to implement support for NetworkPolicy resources. [Network Policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) can be used to detect and block unauthorized network traffic between pods and to/from the Internet.\n\nImplementing Network Policies for our application will block the underlying network traffic generated by the attack. The policies can be enabled within the GitLab project UI:\n\n![Network Policies](https://about.gitlab.com/images/blogimages/2020-08-18-How-GitLab-Can-Help-You-Secure-Your-Cloud-Native-Applications/network-polices.png)\n\n## Web Application Firewall\n\nWith Container Network Security in place, our attack isn’t able to talk to the Redis instance anymore, but it is still possible to execute other network unrelated attacks using the command injection. [Web Application Firewall (WAF)](/stages-devops-lifecycle/govern/) can now help us to increase the security and detect and block the attack at the [Kubernetes Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) level. \n\nThe Web Application firewall is also powered by open-source. It is based on the [ModSecurity](https://kubernetes.github.io/ingress-nginx/user-guide/third-party-addons/modsecurity/) module, a toolkit for real-time web application monitoring, logging, and access control. It is preconfigured to use the [OWASP’s Core Rule Set](https://www.modsecurity.org/CRS/Documentation/), which provides generic attack detection capabilities. Like the other integrations, Web Application Firewall is also fully managed by GitLab using [GitLab Managed Apps](https://docs.gitlab.com/ee/update/removals.html).\n\nIn our example, the Web Application Firewall detects the attack and is also able to block it:\n\n![Web Application Firewall logs](https://about.gitlab.com/images/blogimages/2020-08-18-How-GitLab-Can-Help-You-Secure-Your-Cloud-Native-Applications/waf-log.png)\n\nBlocking the attack at the Ingress level will help us to deny the traffic before it hits our application. To do so, we can enable the Web Application Firewall blocking mode directly from the GitLab UI:\n\n![WAF settings](https://about.gitlab.com/images/blogimages/2020-08-18-How-GitLab-Can-Help-You-Secure-Your-Cloud-Native-Applications/waf-settings.png)\n\nIn addition to Container Host Security, we could have used the Web Application Firewall to detect the attack using the Thread Monitoring dashboard within our GitLab project:\n\n![Thread Monitoring](https://about.gitlab.com/images/blogimages/2020-08-18-How-GitLab-Can-Help-You-Secure-Your-Cloud-Native-Applications/thread-monitoring.png)\n\nThe Thread Monitoring dashboard also provides us with useful insights and metrics of our enforced Container Network Policy.\n\n## Static Application Security Testing\n\nWe have now successfully protected our application runtime and ensured that no additional attacks can be executed. But we should also find and fix the root cause to ensure that such incidents are not recurring in the future. This is where [Static Application Security Testing (SAST)](/stages-devops-lifecycle/secure/) can help us. Static Application Security Testing can be easily integrated into our project using [GitLab CI/CD](https://docs.gitlab.com/ee/ci/) and then allows us to analyze our [source code](/solutions/source-code-management/) for known vulnerabilities.\n\nIn our case (a Golang application) the code scanning is executed using the open-source project [Golang Security Checker](https://github.com/securego/gosec). The results are displayed in the Security dashboard of our GitLab project for easy access:\n\n![Security Dashboard](https://about.gitlab.com/images/blogimages/2020-08-18-How-GitLab-Can-Help-You-Secure-Your-Cloud-Native-Applications/sec-dashboard.png)\n\nIn our example, the code scan has identified the root cause and provides us with detailed information about the vulnerability, the line of code that needs to be fixed, and the ability to easily create an issue to fix it.\n\n![SAST](https://about.gitlab.com/images/blogimages/2020-08-18-How-GitLab-Can-Help-You-Secure-Your-Cloud-Native-Applications/sast.png)\n\nFinally, of course, we should also talk to the team running the other application to make sure that their Redis instance gets secured too. We should also verify how the other [GitLab Secure](https://about.gitlab.com/stages-devops-lifecycle/secure/) features can help to further improve the overall security of the application.\n\n## GitLab Protect and Secure in action\n\nIf you like to get more insights on GitLab Secure and Protect and want to see it in action, you are welcome to join [Wayne](https://gitlab.com/whaber), [Philippe](https://gitlab.com/plafoucriere) and myself in our session [“Your Attackers Won't Be Happy! How GitLab Can Help You Secure Your Cloud-Native Applications!”](https://gitlabcommitvirtual2020.sched.com/event/dUWw/your-attackers-wont-be-happy-how-gitlab-can-help-you-secure-your-cloud-native-applications) at GitLab Commit where you can gain further insights on Container Host Security, Container Network Security, Web Application Firewall (WAF), and Status Application Security Testing (SAST).\n\nRegister today and join me and others at [GitLab Commit](https://about.gitlab.com/events/commit/) on August 26. GitLab Commit 2020 is a free 24-hour virtual experience filled with practical DevOps strategies shared by leaders in development, operations, and security.\n",[1041,9,1477,682,875],{"slug":4645,"featured":6,"template":686},"how-gitlab-can-help-you-secure-your-cloud-native-applications","content:en-us:blog:how-gitlab-can-help-you-secure-your-cloud-native-applications.yml","How Gitlab Can Help You Secure Your Cloud Native Applications","en-us/blog/how-gitlab-can-help-you-secure-your-cloud-native-applications.yml","en-us/blog/how-gitlab-can-help-you-secure-your-cloud-native-applications",{"_path":4651,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4652,"content":4657,"config":4662,"_id":4664,"_type":14,"title":4665,"_source":16,"_file":4666,"_stem":4667,"_extension":19},"/en-us/blog/how-gitlab-iteration-value-drives-innovation-through-the-engineering-organization",{"title":4653,"description":4654,"ogTitle":4653,"ogDescription":4654,"noIndex":6,"ogImage":867,"ogUrl":4655,"ogSiteName":670,"ogType":671,"canonicalUrls":4655,"schema":4656},"How the GitLab iteration value drives innovation through the engineering","GitLab is a unique place to be a developer. Here's why.","https://about.gitlab.com/blog/how-gitlab-iteration-value-drives-innovation-through-the-engineering-organization","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How the GitLab iteration value drives innovation through the engineering\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-06-10\",\n      }",{"title":4653,"description":4654,"authors":4658,"heroImage":867,"date":4659,"body":4660,"category":2241,"tags":4661},[745],"2022-06-10","GitLab is focused on helping developers iterate faster and innovate more collaboratively – and that focus on enabling iteration extends to our own developer culture.\n\nAs an organization, our [CREDIT values](https://handbook.gitlab.com/handbook/values/) are hardwired into our operations and culture. This empowers our development teams to work together – using our own product – to offer QA, feedback, and strategies that make everyone’s work stronger and help our organization iterate faster. \n\nWe asked several engineers and engineering leaders at GitLab to tell us, in their own words, how our values come to life in our engineering organization and how that makes GitLab a unique place to be a developer.\n\n## What attracts engineers to GitLab\n\nTo start, we wanted to understand what attracted some of our current engineers and engineering leaders to join GitLab.\n\n**You’re invited! Join us on June 23rd for the [GitLab 15 launch event](https://page.gitlab.com/fifteen) with DevOps guru Gene Kim and several GitLab leaders. They’ll show you what they see for the future of DevOps and The One DevOps Platform.**\n\n“I was attracted to GitLab because I knew that I had the ability to make an impact. Being remote has shattered the walls between people and teams, so anybody can approach anybody. If something means something to you, you can really work on it. This culture of transparency and collaboration is really important to me.” - [Sri Rangan](/company/team/#sri19), Fullstack Engineer, Incubation Engineering Team\n\n“People are attracted to the global diversity of the team and working asynchronously. I think we have a special working culture at GitLab. When you join, whether you're the manager of multiple people or a manager of yourself, you work asynchronously regardless of where your teams are.” - [Mek Stritti](/company/team/#meks), VP, Quality\n\n“Before coming to GitLab, I was a frontend, backend, Android developer, data scientist, and machine learning engineer, among other things. But the thing about how I work is that I like to switch between those roles. And normally in companies, you can't grow across all those roles. You need to grow as a specialist, not a generalist. But within the Incubation Engineering team, I get to do that.” - [Eduardo Bonet](/company/team/#eduardobonet), Fullstack Engineer, Incubation Engineering Team\n\n“The feedback that I quite often hear from engineers is just how strong the team is around them, and how collaborative the rest of the organization is. For my team in particular, a big part of their success is to be able to collaborate effectively with both the people that they work with and other teams. A lot of candidates are attracted to GitLab by the transparency value. Transparency is something that we really try to encourage, and it becomes a big mindset.” - [Bartek Marnane](/company/team/#bmarnane), VP, Incubation Engineering\n\n## How we ensure collaboration across the organization \n\nBeyond the aspects of GitLab that attracted many of our current engineers, it was clear that the culture they experienced during their time here ensured there was collaboration across various teams within our engineering organization. \n\n\"We have an organization that supports each other. You propose a feature, you're building something, and you can collaborate very easily across the globe, across departments with people in infrastructure and security. So when you're building something it's not all on you to ensure its stability and reliability and safety – the entire organization takes ownership of that.” - [Darby Frey](/company/team/#darbyfrey), Fullstack Engineer, Incubation Engineering\n\n“We have a strong culture of collaboration, people reach out and say, “Hey, I'm looking for someone to dogfood this,” and we're always willing to pick those up. Our team has a goal to dogfood a new feature every milestone.” - [Kyle Wiebers](/company/team/#kwiebers), Manager, Engineering, Quality Team\n\n## Why we believe in iteration (and building boring solutions when they work)\n\nOur engineering teams are always thinking about how to best deliver value and receive feedback along the way. It turns out that iteration and building boring solutions that can be delivered quickly is a great way to deliver value and receive feedback. For example, our [Incubation team](/handbook/engineering/development/incubation/) is working to move away from the natural instinct to develop a prototype, get it working, then putting it into the product.\n\n“We’re asking,'how can we look at what you are planning on doing, and then divide that into milestones where every one of those milestones can be integrated into the product?' So we get value out of it and get feedback out of it as well.” - Bartek \n\nAcross other parts of GitLab’s engineering organization, the same type of approach is being embraced.\n\n“For my team, what we try to do is identify a big problem, and then identify lots of small solutions towards that problem. The embrace of efficiency and iteration really aligns with the team that I'm on.” - Kyle\n\n“We want to ship new features quickly so we can get feedback. That first version isn’t going to be perfect, but we're okay with that. We all agree that it's better to get feedback than to spend six months polishing every pixel on a feature that maybe no one wants, and then having to throw it out.” - Darby\n\nWhether it’s our Incubation Engineering team or Quality in Engineering team, embracing iteration and collaboration as a way to achieve results has become the standard approach. \n\nLearn more about how you can contribute to a culture of empathy and productivity by launching or progressing your career at GitLab by checking out our [careers page](/jobs/).\n",[9,9,728],{"slug":4663,"featured":6,"template":686},"how-gitlab-iteration-value-drives-innovation-through-the-engineering-organization","content:en-us:blog:how-gitlab-iteration-value-drives-innovation-through-the-engineering-organization.yml","How Gitlab Iteration Value Drives Innovation Through The Engineering Organization","en-us/blog/how-gitlab-iteration-value-drives-innovation-through-the-engineering-organization.yml","en-us/blog/how-gitlab-iteration-value-drives-innovation-through-the-engineering-organization",{"_path":4669,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4670,"content":4676,"config":4682,"_id":4684,"_type":14,"title":4685,"_source":16,"_file":4686,"_stem":4687,"_extension":19},"/en-us/blog/how-gitlabs-customer-and-partner-focus-fuels-our-culture",{"title":4671,"description":4672,"ogTitle":4671,"ogDescription":4672,"noIndex":6,"ogImage":4673,"ogUrl":4674,"ogSiteName":670,"ogType":671,"canonicalUrls":4674,"schema":4675},"How GitLab's customer and partner focus fuels our culture","It’s an exciting time to be working in a customer- or partner-facing role at GitLab. Our sales team members explain why.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679412/Blog/Hero%20Images/sales_blog_image_tiny.jpg","https://about.gitlab.com/blog/how-gitlabs-customer-and-partner-focus-fuels-our-culture","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab's customer and partner focus fuels our culture\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jake Foster\"}],\n        \"datePublished\": \"2022-05-03\",\n      }",{"title":4671,"description":4672,"authors":4677,"heroImage":4673,"date":4679,"body":4680,"category":2241,"tags":4681},[4678],"Jake Foster","2022-05-03","\n\nIt’s an exciting time to be working in a customer- or partner-facing role at GitLab. Our role with customers is to build personalized relationships and demonstrate how we can help them solve problems with a best-in-class DevOps platform. \n\nAs we grow, our customer and partner focus plays a key role in building a healthy, connected workplace culture at GitLab. So we asked some of our leaders and team members from across the Sales, Channel Partner, and Account Management teams to share their insights. Here’s what we learned.\n\n## The opportunity we have to become the leader in DevOps means hiring more top-tier talent \n\n\"We are on a journey as a company where we believe we have got this exciting market opportunity. We've got a great product that fits the market really well, and that product is an industry leader.\n\n\"We believe a lot of companies are going to buy DevOps. We need to make sure that they buy that from us and that's a hard thing. That execution requires lots of top talent. We want to keep growing, as a team and individually, to capture more market share. That's going to take a lot of people who are great at what they do.\"\n\n- Michael McBride (a.k.a \"McB\"), Chief Revenue Officer\n\n## Why GitLab is an ideal place to grow in a sales or channel partner role \n\n\"We have an integrated GTM with our field sales teams and channels and alliances partners. I look after both the sales organization that manages those partners and supports them and their engagement with our direct selling force, as well as the programs and enablement and functions that it takes to integrate those partners into our go-to-market. \n\n\"I believe we've got great technology, great market timing, high customer need, lots of customer value, and a great product. That makes for a pretty awesome mix from a partnering perspective. It’s lots of fun to manage partners who are aiming to grow their businesses at the same time. It’s going to make the partners very happy.\" \n\n- Michelle Hodges, VP, WW Channels \n\n\"At my previous company, we were an unknown entity and you had to really pull out all the stops to get people just to take a call with you or to test the product or buy the product. Whereas, with GitLab, I would get on calls and customers are super excited to meet people from GitLab. There were quite a few cases where people were already going to buy GitLab, but they just needed someone to help them understand what they wanted to buy. It was a salesperson's dream because you are working with people who not just love the product, but love what the company stands for. \n\n\"I remember one time I was in a coffee shop, and I had a GitLab sticker on my laptop. Someone saw that – he was a developer, he came up to me and said, 'Wow, you work at GitLab. I love that company and we use it in our team.' I felt a bit like a celebrity getting spotted on the streets.\" \n\n- Anthony Ogunbowale - Thomas, Named Account Executive, EMEA \n\n## What makes our culture unique \n\n\"The things in the [company handbook](/handbook/) can be kind of unbelievable to folks from the outside, when they say there's unlimited vacation time or they value results, not hours. But after being here for three years, it's true – there’s a real emphasis on valuing productivity and results. And, when people produce results, they’re rewarded.\" \n\n- Kevin Vogt, Federal Technical Account Manager\n\n\"I am not joking when I say this: This is the most successful I've ever felt in my career. And a lot of it is down to our values. \n\n\"We have a value system that's called [CREDIT](https://handbook.gitlab.com/handbook/values/): It's collaboration; results; efficiency; diversity, inclusion, and belonging; iteration; and transparency. You will find in every engagement with a GitLab team member that they work towards exhibiting those things in a really authentic, intentional manner. It makes it a great place to build relationships, but also to get your job done. It creates innovation, speed, and teamwork in a way that I haven't found before.\" \n\n- Michelle Hodges\n\n## How GitLab sets its team members up for success \n\n\"We 'dogfood' our tools. We use GitLab for everything from HR to legal – the entire company uses GitLab as a platform. \n\n\"The company is also great with training. Any time that I've ever wanted training for any kind of need in my business role, they've always provided it and reimbursed it. I just finished a month's worth of training classes on how to be a successful manager. That's my first month going into that role, trying to make sure that I can be set up for success in it.\" \n\n- Kevin Vogt\n\n\"Every conversation with the customer is a collaboration. In pre-sales, we have a solutions architect, who's more of a technical person, and they can help lead on answering technical questions or do demos and proof of concepts. And then, depending on how the conversation is going, we might bring on someone from Product, in relation to what the customer's looking at. Everyone in the organization works together to help the customer understand and feel comfortable with the solution.\" \n\n- Anthony Ogunbowale - Thomas\n\n\"McB, our CRO, does his own Reverse Ask Me Anything session for team members that are underrepresented in tech to understand what the experience is on the GitLab Sales team. And also what upward mobility and trajectory could look like in the company. \n\n\"I feel very supported here. I feel empowered. It's one of the first jobs I've felt where they just trust me. They tell me to take things and run with it.\" \n\n- Marcus Carter, Senior Sales Recruiter\n\n## What we’re looking for as we grow our team \n\n\"I would say, curiosity is huge. Somebody who's curious and doesn't mind asking questions. I'd say somebody who is customer-focused, somebody who's excited about our customers, and somebody who's excited about technology as a whole, and in how technology is set to advance us. It's someone who is tenacious, somebody who is unrelenting and trying to offer solutions.\" \n\n- Marcus Carter\n\n\"This is a place where we believe we have a large market in every single one of our territories. There are customers that need the right DevOps solution and our product fits with those customers really well. So that leaves one last thing, sales skill. \n\n\"That’s great for a sales rep. If I've got the right product and a solid market, I'm excited, because I know I can deliver the sales skill, especially if I've got the marketing support and all the other things that GitLab has.\" \n\n- Michael McBride\n\n\nIf GitLab sounds like the place for you, there’s plenty more to learn about what it’s like to be a part of our team on our [careers site](/jobs/). You can also [learn more about open roles on our team](https://boards.greenhouse.io/gitlab).\n",[728,813,9],{"slug":4683,"featured":6,"template":686},"how-gitlabs-customer-and-partner-focus-fuels-our-culture","content:en-us:blog:how-gitlabs-customer-and-partner-focus-fuels-our-culture.yml","How Gitlabs Customer And Partner Focus Fuels Our Culture","en-us/blog/how-gitlabs-customer-and-partner-focus-fuels-our-culture.yml","en-us/blog/how-gitlabs-customer-and-partner-focus-fuels-our-culture",{"_path":4689,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4690,"content":4695,"config":4700,"_id":4702,"_type":14,"title":4703,"_source":16,"_file":4704,"_stem":4705,"_extension":19},"/en-us/blog/how-is-ai-ml-changing-devops",{"title":4691,"description":4692,"ogTitle":4691,"ogDescription":4692,"noIndex":6,"ogImage":2274,"ogUrl":4693,"ogSiteName":670,"ogType":671,"canonicalUrls":4693,"schema":4694},"How is AI/ML changing DevOps?","Can DevOps help AI/ML find maturity? Here are questions to consider.","https://about.gitlab.com/blog/how-is-ai-ml-changing-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How is AI/ML changing DevOps?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2022-11-16\",\n      }",{"title":4691,"description":4692,"authors":4696,"heroImage":2274,"date":4697,"body":4698,"category":679,"tags":4699},[766],"2022-11-16","\n\nThe last few years have seen an explosion in artificial intelligence, [machine learning](/blog/top-10-ways-machine-learning-may-help-devops/), and other types of projects. Companies like Hugging Face and applications like [DALL-E 2](https://openai.com/dall-e-2/) have brought to the mainstream what the power of AI/ML can bring to the next generation of computing and software. As every company has become a software company over the last few decades, the ability to innovate and leverage the ever-growing amount of data that organizations have access to have become where enterprises turn to compete.\n\nHowever, a lot of AI/ML projects get stalled from several challenges that may seem familiar to software professionals who have been around since [the early days of DevOps](/blog/the-journey-to-a-devops-platform/).  Adoption and optimization of artificial intelligence and machine learning have been hampered by a lack of repeatability for experiments, a disparity of tools and information silos, and a lack of team collaboration.\n\n## A new model for data modeling\n\nOne of the first ways to look at this problem is to make sure that the mental model is in place to allow the team to reason about both the strategic vision for AI/ML at your organization. And once that has been established, also think about the tactical “jobs to be done” to lay the foundation for that work.\n\nStrategically, there are many teams that have to come together for a successful AI/ML program. First, the data has to both be acquired and transformed into a usable set of clean data. Often referred to as [“DataOps”](/blog/introducing-modelops-to-solve-data-science-challenges/) this involves the typical “ETL” or extract, load, transform processes data has to go through to be useful for teams. From there, you have to productionize the data workloads through MLOps - the experimentation, training, testing, and deployment of meaningful models based on the extracted and transformed data.\n\nAnd once those two steps are complete, you can finally understand how to make production use cases for your data. You can use AI Assisted features to focus on improving user experiences, for financial forecasting, or for general trends and analysis of various parts of your business. Given the complexity of this value chain, the various teams and skills involved, and the current mishmash of tooling, there is a lot that teams can learn from the history of DevOps as they tackle these problems.\n\n## DevOps and AI/ML\n\nMuch like the various stages of obtaining and applying AI/ML for business uses, software development consists of many varied steps with different teams and skills sets to achieve the business goals outlined. That is why years ago, folks came up with this [concept of “DevOps”](/topics/devops/)– combining teams and having them work together in a cycle of continuous improvement towards the same goals – to combat silos and inefficiencies. \n\nData science teams are using specialized tools that don't integrate with the existing software development lifecycle tools they already use. This causes teams to work in silos, creating handoff friction and resulting in finger-pointing and lack of predictability. Businesses and software teams often fail to take advantage of data, and it takes months for models to get into production by which time they may be out of date or behind competitors.  Security and data ethics are frequently treated as an afterthought. This creates risk for organizations and slows innovation. \n\n## Learning from the past\n\nIf the past decades of “DevOps” evolution have taught us anything, it's that breaking down the silos between teams through the tools and processes they are using pays off dividends for business. As your team begins their [AI/ML journey](/blog/why-ai-in-devops-is-here-to-stay/) — or if you've found yourself stalling in AI/ML initiatives already — you should consider how you can consolidate teams together, ensure they are working efficiently together, and able to collaborate without boundaries.\n\nAn explosion of tools in the space is tantalizing with the promise of “getting started” quickly. But it may not set your organization up for long-term success in these areas if those tools have the effect of separating parts of your organization from one another. Creating and sustaining an AI/ML program will require intentionality behind both the processes and tools your team is using. That allows your teams to extract, transform and load data efficiently, tune, test and deploy models effectively, and leverage AI/ML to drive value for your stakeholders for the long haul.\n",[9,231,1040,1181],{"slug":4701,"featured":6,"template":686},"how-is-ai-ml-changing-devops","content:en-us:blog:how-is-ai-ml-changing-devops.yml","How Is Ai Ml Changing Devops","en-us/blog/how-is-ai-ml-changing-devops.yml","en-us/blog/how-is-ai-ml-changing-devops",{"_path":4707,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4708,"content":4713,"config":4718,"_id":4720,"_type":14,"title":4721,"_source":16,"_file":4722,"_stem":4723,"_extension":19},"/en-us/blog/how-modern-devops-practices-are-changing-the-operations-role",{"title":4709,"description":4710,"ogTitle":4709,"ogDescription":4710,"noIndex":6,"ogImage":4180,"ogUrl":4711,"ogSiteName":670,"ogType":671,"canonicalUrls":4711,"schema":4712},"How modern DevOps practices are changing the operations role","Today, the ops role is about far more than just keeping the lights on. Here's how modern DevOps practices are expanding ops' responsibilities.","https://about.gitlab.com/blog/how-modern-devops-practices-are-changing-the-operations-role","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How modern DevOps practices are changing the operations role\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-10-19\",\n      }",{"title":4709,"description":4710,"authors":4714,"heroImage":4180,"date":4715,"body":4716,"category":769,"tags":4717},[851],"2022-10-19","\nRemember NoOps, the idea that automation would eliminate the operations role completely? Fast forward a few years and the idea of NoOps today seems almost laughable. In today’s modern DevOps teams it’s safe to say it’s really _“AlltheOps_,” at least based on the results of our [2022 Global DevSecOps Survey](/developer-survey/).\n\n## An expanding role\n\n[No DevOps job is static](/blog/the-changing-roles-in-devsecops/), but ops pros are experiencing truly dramatic changes to their work lives. In fact, ops pros reported seven areas of responsibility now added to their plates thanks to modern DevOps practices:\n\n- Managing the cloud\n- Managing the hardware/infrastructure\n- Maintaining the toolchain\n- DevOps coaching\n- Responsibility for automation\n- Overseeing all compliance and audits\n- Platform engineering\n\nManaging the cloud and hardware/infrastructure were the two tasks most frequently named, and they were split nearly evenly down the middle, with roughly 50% of ops pros focusing on one or the other task primarily. Another area – [maintaining the toolchain](/blog/too-many-toolchains-a-devops-platform-migration-is-the-answer/) – is apparently now a job shared with developers, as devs also told us they were spending more time on toolchain maintenance and integration than ever before. That’s not surprising: 44% of teams reported they use between two and five tools, while 41% use between six and 10 tools. That’s a lot of tools, which is clearly one reason for the added ops support. \n\nCompliance and audits are another “new to Ops” area of focus, and this added emphasis comes at a time when organizations everywhere are trying to avoid security breaches with [an increased focus on compliance](/blog/the-importance-of-compliance-in-devops/). It’s a time-consuming process: The majority of ops pros told us they spend between one-quarter and half their time [on audits and compliance](/blog/what-you-need-to-know-about-devops-audits/), a 15% increase since 2021. Almost 25% of ops pros spend between half and three-quarters of their time on these tasks. \n\n## Keeping the balls in the air\n\nThe rising use of [DevOps platforms](/topics/devops-platform/) (75% of our respondents said their organizations already use a DevOps platform or plan to add one this year) is driving operations team members toward [platform engineering](/topics/devops/what-is-a-devops-platform-engineer/). Operations pros are also doubling down on tasks that were likely more informal in the past: DevOps coaching and responsibility for automation. The focus on automation is clearly paying off: In 2022, just shy of 25% of ops pros said their modern DevOps practices were fully automated, up 5 points from 2021 and nearly 17 points from 2020. All told, 68% of ops pros said their DevOps teams were “completely” or “mostly” automated.\n\nAnd while ops is adding new responsibilities thanks to modern DevOps, developers are picking up tasks that have traditionally belonged to operations:\n\n- Nearly 77% of devs can provision their own environments.\n- Roughly 38% of developers instrument the code.\n- Another 38% monitor and respond to the infrastructure. \n- 36% of devs said they’re on-call for in-app production alerts.\n\nThe role-swapping doesn't stop there: Nearly 50% of ops pros said they're solely responsible for security on their DevOps teams, up 20% from last year. To put that into perspective, 53% of security respondents told us they felt security was *everyone's* responsiblity.\n\n## Ops, modern DevOps, and TMI\n\nOps pros’ new roles have created some surprising by-products, namely loads of data that teams aren’t necessarily set up to manage effectively. In fact, many of today’s operations teams have a “too much information” problem. A full 39% of ops pros said the DevOps data they need exists but accessing and managing it is difficult. Another 27% said they’re “overwhelmed” by the amount and scope of the data while 14% don’t know what data they need or say their organizations don’t track it. Less than 20% of ops pros say they have the data they need and it’s easy to work with.\n\nHow do you see the ops role changing in the modern DevOps world? Let us know in the comments.\n",[9,681,1515],{"slug":4719,"featured":6,"template":686},"how-modern-devops-practices-are-changing-the-operations-role","content:en-us:blog:how-modern-devops-practices-are-changing-the-operations-role.yml","How Modern Devops Practices Are Changing The Operations Role","en-us/blog/how-modern-devops-practices-are-changing-the-operations-role.yml","en-us/blog/how-modern-devops-practices-are-changing-the-operations-role",{"_path":4725,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4726,"content":4732,"config":4738,"_id":4740,"_type":14,"title":4741,"_source":16,"_file":4742,"_stem":4743,"_extension":19},"/en-us/blog/how-orange-uses-gitlab-ci-cd-for-modern-devops",{"title":4727,"description":4728,"ogTitle":4727,"ogDescription":4728,"noIndex":6,"ogImage":4729,"ogUrl":4730,"ogSiteName":670,"ogType":671,"canonicalUrls":4730,"schema":4731},"How Orange made a first step toward CI/CD standardization with GitLab","Find out how Orange made a first step toward CI/CD standardization with GitLab","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682084/Blog/Hero%20Images/oranges.jpg","https://about.gitlab.com/blog/how-orange-uses-gitlab-ci-cd-for-modern-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Orange made a first step toward CI/CD standardization with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Pierre Smeyers\"}],\n        \"datePublished\": \"2021-07-29\",\n      }",{"title":4727,"description":4728,"authors":4733,"heroImage":4729,"date":4735,"body":4736,"category":1318,"tags":4737},[4734],"Pierre Smeyers","2021-07-29","\n\nCI/CD is a foundational piece to modern software development. It's a major brick in the [DevOps](/topics/devops/) \"Automation\" pillar and every company involved in IT has to implement CI/CD or they're already quite far behind the curve.\n\nBut [implementing CI/CD](/topics/ci-cd/) can be challenging especially for growing or large companies. Some of those challenges include:\n\n* DevOps expertise and technical skills\n* [DevSecOps](/topics/devsecops/)\n* Standardization\n\n## Three key hurdles that come with implementing CI/CD\n\nThis blog post unpackes these challenges and explains how [Orange](https://orange.com/) overcame them using GitLab.\n\n### DevOps and technical skills\n\nNo matter which CI/CD tool you're using, it requires some amount of expertise to implement it right.\n\n**DevOps expertise** is important because your team needs some experience with Git workflows, deployment, environments, secrets management, etc. You can't ask a complete rookie to implement a state-of-the art DevOps pipeline without expertise or experience.\n\n**Technical skills** are also important for implementing CI/CD. Any professional can tell you that getting started tutorials are insufficient. We inevitably need advanced functions, and that requires knowing the tool pretty well. This is particularly true with GitLab CI/CD, which is a fantastic functionally rich tool. GitLab CI/CD is constantly evolving, which creates an ongoing burden for projects that want to integrate new tooling as they go.\n\n### DevSecOps\n\nDevOps is all about finding the right balance between shortening the cycle and maximizing your confidence.\n\n[DevSecOps tools](/solutions/security-compliance/) are a keystone in maximizing our confidence because they detect issues with things like security, code quality, and compliance, etc., almost instantly. But DevSecOps tools are evolving quickly and today's Docker container scanner tools can be replaced by newcomers in just a few months.\n\nAlso, having each development team in the company choose and integrate various DevSecOps tools doesn't make sense and will be a waste of time and resources. Going this route means most developers won't use any DevSecOps tool because the opportunity cost isn't worth the time and effort.\n\n### Standardization\n\nThe last challenge in implementing CI/CD at a large company is the lack of standardization.\n\nGitLab CI/CD - as with most other CI/CD tools - is mainly a sophisticated scheduler, allowing a team to define technical tasks and their sequence. GitLab CI/CD cares little about the nature of these tasks, and does not give any clues as to the \"right\" way to build a DevOps pipeline. The consequence of this is that every company, project team, and developer will implement a DevOps pipeline their own way, in a manner that is probably significantly different from their colleagues'.\n\nAs a lifelong Javaist, I like to compare the current situation in CI/CD with what was the Java build in the pre-Maven era. Back then, we used non-structuring tools such as [Make](https://en.wikipedia.org/wiki/Make_(software)) or [Apache Ant](https://en.wikipedia.org/wiki/Apache_Ant). Each project created its own build system, adopted its own conventions, code, and resource files structure. In short, it was a happy mess with everyone reinventing the wheel. When joining another project, a user had to ask: \"How does the build work here?\".\n\nIn 2004, Maven was released (and Gradle three years later). For a while, there were heated debates between the proponents of standardization and the defenders of expertise and customization. Today it would not occur to anyone to build a Java project with anything other than Maven or Gradle. Now, if I join a project developed in Java, I will immediately know how files are organized and how the project is built. Java build is now standardized.\n\nI believe that CI/CD ought to go a similar route: tools should offer a more opinionated framework so that CI/CD too becomes a non-topic.\n\n## How a single GitLab feature changed the game for Orange\n\nAt Orange - probably like many other companies involved in IT - we struggled with the three challenges summarized above.\n\nThen in January 2019, the [`include`](https://docs.gitlab.com/ee/ci/yaml/#include) feature was released in the [Community Edition (version 11.7) of GitLab](/releases/2019/01/22/gitlab-11-7-released/):\n\n```yaml\ninclude:\n  - project: a-path/to-some-project'\n    file: '/very-smart-template.yml'\n```\n\nThis feature finally gave us the ability to develop and share state-of-the-art GitLab CI/CD pipeline templates!\n\nSo that's what we did.\n\nFor two years, a handful of DevOps/security/languages/cloud experts developed ready-to-use GitLab CI/CD pipeline templates. This personal initiative quickly became recognized as an internal project, attracting more users and contributors, bringing the community to 1000+ members as of June 2021, and leveraging about 30 available templates. The visible effect of this increasing adoption is the beginning of a **CI/CD standardization at Orange**.\n\nWe were so happy with our results and convinced that it's a general need that we open sourced our templates under the name [\"to be continuous\"](https://to-be-continuous.gitlab.io/doc/).\n\n![To be continuous logo](https://about.gitlab.com/images/blogimages/orange_tbc.jpg){: .shadow}\nThe \"to be continuous\" logo.\n{: .note.text-center}\n\n### What is in *to be continuous*?\n\nFor now, *to be continuous* has 26 templates of six kinds:\n\n* **Build & Test**: Angular, Bash, Go, Gradle, Maven, MkDocs, Node.js, PHP, Python\n* **Code Analysis**: Gitleaks, SonarQube\n* **Packaging**: Docker\n* **Infrastructure** (IaC): Terraform\n* **Deploy & Run**: Ansible, Cloud Foundry, Google Cloud, Helm, Kubernetes, OpenShift, S3 (Simple Storage Service)\n* **Acceptance**: Cypress, Postman, Puppeteer, Robot Framework, SSL test, k6\n* **Others**: semantic-release\n\n*To be continuous* is thoroughly documented:\n\n* [Basic notions and philosophy](https://to-be-continuous.gitlab.io/doc/understand/)\n* [General usage principles](https://to-be-continuous.gitlab.io/doc/usage/)\n* How to use *to be continuous* in a [self-managed instance of GitLab](https://to-be-continuous.gitlab.io/doc/self-managed/basic/)\n* Every template also has [its own documentation](https://to-be-continuous.gitlab.io/doc/ref/angular/)\n\nTo get started quickly, *to be continuous* provides an [interactive configurer](https://to-be-continuous.gitlab.io/kicker/) (aka *\"kicker\"*) that allows generating the `.gitlab-ci.yml` file simply by selecting the technical characteristics of your project.\n\nFinally, *to be continuous* exposes several [example projects](https://gitlab.com/to-be-continuous/samples), illustrating how to use the templates in production-like projects, combining multiple templates.\n\n### A quick glance at *to be continuous*\n\nThere are tons of resources to get started with *to be continuous*. But here is a quick example to get the taste of it.\n\nHere is the `.gitlab-ci.yml` file for a project:\n\n* Developed in Java 11 (built with Maven)\n* Code analysis with SonarQube\n* Packaged as a Docker image\n* Deployed to Kubernetes cluster\n* GUI tests with Cypress\n* API tests with Postman (Newman)\n\n```yaml\ninclude:\n  # Maven template\n  - project: \"to-be-continuous/maven\"\n    ref: \"1.4.2\"\n    file: \"templates/gitlab-ci-maven.yml\"\n  # Docker template\n  - project: \"to-be-continuous/docker\"\n    ref: \"1.2.0\"\n    file: \"templates/gitlab-ci-docker.yml\"\n  # Kubernetes template\n  - project: \"to-be-continuous/kubernetes\"\n    ref: \"1.2.0\"\n    file: \"templates/gitlab-ci-k8s.yml\"\n  # Cypress template\n  - project: \"to-be-continuous/cypress\"\n    ref: \"1.2.0\"\n    file: \"templates/gitlab-ci-cypress.yml\"\n  # Postman template\n  - project: \"to-be-continuous/postman\"\n    ref: \"1.2.0\"\n    file: \"templates/gitlab-ci-postman.yml\"\n\n# Global variables\nvariables:\n  # Explicitly define the Maven + JDK version\n  MAVEN_IMAGE: \"maven:3.8-openjdk-11\"\n\n  # Enables SonarQube analysis (on sonarcloud.io)\n  SONAR_URL: \"https://sonarcloud.io\"\n  # organization & projectKey defined in pom.xml\n  # SONAR_AUTH_TOKEN defined as a secret CI/CD variable\n\n  # Kubernetes\n  K8S_KUBECTL_IMAGE: \"bitnami/kubectl:1.17\" # client version matching my cluster\n  K8S_URL: \"https://k8s-api.my.domain\" # Kubernetes Cluster API url\n  # K8S_CA_CERT & K8S_TOKEN defined as secret CI/CD variables\n  # enable review, staging & prod\n  K8S_REVIEW_SPACE: \"non-prod\"\n  K8S_STAGING_SPACE: \"non-prod\"\n  K8S_PROD_SPACE: \"prod\"\n\n  # Cypress & Postman: enable test on review aps\n  REVIEW_ENABLED: \"true\"\n\n# Pipeline steps\nstages:\n  - build\n  - test\n  - package-build\n  - package-test\n  - review\n  - staging\n  - deploy\n  - acceptance\n  - publish\n  - production\n  ```\n\nThis fully declarative file produces the following **development pipeline** (any feature branch):\n\n![Screenshot of development pipeline](https://about.gitlab.com/images/blogimages/orange_development_pipeline.jpg){: .shadow}\n\n... and the following **production pipeline** (`master` or `main` depending on your preferences):\n\n![Screenshot of production pipeline](https://about.gitlab.com/images/blogimages/orange_production_pipeline.jpg){: .shadow}\n\nAlthough they look pretty much the same, they aren't:\n\n* While the production pipeline privileges sureness and completeness, development pipelines privilege short cycles and developer experience. While code analysis jobs and acceptance tests are blocked in production, they only generate a non-blocking warning in development in case of failure.\n* The production pipeline deploys to the staging environment before deploying to production (provided acceptance tests are green). Development pipelines may deploy to a dynamically generated review environment (optional).\n* Developers may prefer to use a single integration environment (associated with the develop branch) instead of one review app per feature branch. The default behavior of the integration pipeline is much closer to the production one.\n\nWhat you can't see:\n\n* Java unit tests are automatically executed, their report is [integrated to GitLab](https://docs.gitlab.com/ee/ci/unit_test_reports.html), with [code coverage](https://docs.gitlab.com/ee/ci/yaml/#coverage) too.\n* SonarQube integration automatically uses [branch analysis](https://docs.sonarqube.org/latest/branches/overview/) or [MR analysis](https://docs.sonarqube.org/latest/analysis/pull-request/) (with MR decoration) depending on the context.\n* Kubernetes environments are obviously [integrated to GitLab](https://docs.gitlab.com/ee/ci/environments/) too.\n* [Review apps](https://docs.gitlab.com/ee/ci/review_apps/index.html) can be cleaned-up manually or automatically on branch deletion.\n* Cypress and Postman tests reports are also [integrated to GitLab](https://docs.gitlab.com/ee/ci/unit_test_reports.html).\n* Docker uses the Kaniko build by default but it might be configured to use Docker-in-Docker instead. It uses the GitLab registry by default but can be configured to use any other registry.\n* Each template integrates the most appropriate DevSecOps tools: [kube-score](https://kube-score.com/) for Kubernetes, [hadolint](https://github.com/hadolint/hadolint) for Docker, [OWASP Dependency-Check](https://jeremylong.github.io/DependencyCheck/) for Maven, among others.\n* All those templates combine themselves gracefully. For example, Kubernetes may simply deploy the Docker image built upstream; Cypress and Postman tests automatically test the application deployed in the upstream jobs; Kubernetes could be replaced with OpenShift, GCP or any other supported hosting technology, it would behave the same.\n\n## Contribute to *to be continuous*\n\n[to be continuous](https://to-be-continuous.gitlab.io/doc) is out and eagerly waiting for users and contributors.\n\nHave a look and share your feedback. Whether you like our choices or not, we want to hear from you. Your inputs are even more valuable to help us improve *to be continuous* and cover as many use cases as possible.\n\nBut anyway, never forget this: [`include`](https://docs.gitlab.com/ee/ci/yaml/#include) is undoubtedly the feature that makes CI/CD standardization possible in your company (and beyond).\n\nCover image by [Graphic Node](https://unsplash.com/@graphicnode) on [Unsplash](https://unsplash.com/photos/yi1YB_FubH8)\n{: .note}\n",[9,682,976],{"slug":4739,"featured":6,"template":686},"how-orange-uses-gitlab-ci-cd-for-modern-devops","content:en-us:blog:how-orange-uses-gitlab-ci-cd-for-modern-devops.yml","How Orange Uses Gitlab Ci Cd For Modern Devops","en-us/blog/how-orange-uses-gitlab-ci-cd-for-modern-devops.yml","en-us/blog/how-orange-uses-gitlab-ci-cd-for-modern-devops",{"_path":4745,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4746,"content":4752,"config":4757,"_id":4759,"_type":14,"title":4760,"_source":16,"_file":4761,"_stem":4762,"_extension":19},"/en-us/blog/how-smbs-can-save-with-gitlabs-devops-platform",{"title":4747,"description":4748,"ogTitle":4747,"ogDescription":4748,"noIndex":6,"ogImage":4749,"ogUrl":4750,"ogSiteName":670,"ogType":671,"canonicalUrls":4750,"schema":4751},"How SMBs can save with the GitLab DevOps Platform","Use our ROI Calculator to understand how a DevOps platform saves money.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667966/Blog/Hero%20Images/global-compensation-calculator-iteration.jpg","https://about.gitlab.com/blog/how-smbs-can-save-with-gitlabs-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How SMBs can save with the GitLab DevOps Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-09-08\",\n      }",{"title":4747,"description":4748,"authors":4753,"heroImage":4749,"date":4754,"body":4755,"category":769,"tags":4756},[810],"2022-09-08","\n\nMigrating from a complex and costly DevOps toolchain to The One DevOps Platform from GitLab can not only save a small and medium-sized business (SMB) from an inefficient workload, it can result in a big financial savings, too.\n\nAnd that savings could mean the difference between an [SMB failing and thriving](/blog/6-ways-smbs-can-leverage-the-power-of-a-devops-platform/) in a cutthroat and unpredictable market. That’s right… GitLab’s end-to-end platform can turn IT into a business driver that speeds software creation, boosting competitiveness and pulling in more revenue. \n\nThat’s key for SMBs, which have small IT teams or maybe even a team of one. That means there are fewer hands to do the work, and likely less financial resources. SMBs might also have a harder time hiring general IT people who also can develop and deploy code. The benefits of a single DevOps platform help solve several SMB issues.\n\n“Where [migration is an investment](https://learn.gitlab.com/smbmigrationguide/migratedevopssmb) in time and change, it’s an investment that will pay a lot of dividends in time and money savings,” says [Brendan O’Leary](https://gitlab.com/brendan), staff developer evangelist at GitLab. “If it’s done right, the ROI will be very quick. You can get rid of all those other tools, while speeding up your ability to iterate and serve your customers.”\n\n## Use an ROI calculator\n\nWith so many factors to consider, how can IT managers measure potential savings? \n\nGitLab can help with that. Check out our [ROI Calculator](https://about.gitlab.com/calculator/roi/), which can help estimate the financial benefits an SMB could realize by moving to GitLab from their DIY DevOps toolchain.\n\nGetting rid of a tangle of disparate tools means freeing up money spent on licensing fees, as well as on updating and maintenance. It also means freeing up all the time IT people spend context switching between these tools. And don’t forget The One DevOps Platform is going to help teams develop and deploy faster and more efficiently, [making SMBs more nimble and competitive](/blog/can-an-smb-or-start-up-be-too-small-for-a-devops-platform/). \n\n## How to save with GitLab\n\nLet’s look at [how an SMB can save money](https://cdn.pathfactory.com/assets/10519/contents/427544/b901b768-7b0e-4590-b00e-047a80536cdb.pdf) by shifting from a complex toolchain to GitLab’s DevOps Platform:\n\n- License fees are obvious costs that need to be considered when trying to calculate ROI. \n\n- The cost of maintaining the software over time also needs to be factored in. \n\n- Consider how much time and energy is spent on tool upgrades, security patching, and monitoring the performance and overall availability of a multitude of tools.\n\n- Because GitLab’s platform speeds development and deployment, productivity increases and that propels revenue and opportunities to grow the company.\n\n- A complicated toolchain that has users continually jumping between tools and switching interfaces creates a chaotic environment that requires constant management, tweaking, updating, and stitching. That means IT is managing the toolchain instead of actually developing and delivering the code that drives the bottom line. Software isn’t efficiently created in a chaotic environment.\n\n- IT is wasting time, energy, and money getting up to speed on each tool. This goes for every new person who has to learn each tool, instead of a single application.\n\n- SMBs find new customers – and the revenue they bring – by creating software that satisfies customers’ needs. You can do that more quickly with a DevOps platform.\n\n- Because GitLab’s DevOps Platform enables companies to develop and deploy more securely, more quickly, and with less hands-on work, SMBs are more able to change on a dime to meet or get ahead of new demands and even new competitors.\n\n- If an IT team is spending time on the care and feeding of a toolchain instead of doing interesting software development, it can cause stress and job dissatisfaction, which could lead to problematic turnover.\n\n- The One DevOps Platform naturally pushes security left so it’s automatically integrated into every step of the development lifecycle. Detecting errors early in the process is much cheaper and less time consuming than detecting them in production. \n\nThat’s a lot of ways to save money and earn extra revenue. And all of that can be done with one single license, one permission model, and one interface, giving teams the time and resources to focus on creating business value instead of managing a toolchain.\n\n## Drilling down on ways to save\n\nGitLab’s DevOps Platform allows teams to move from, or avoid, that often complex and confusing multitude of tools by using a single, complete software development ecosystem. An SMB may be small enough that it hasn’t amassed a complex toolchain – yet. But it will only grow more unwieldy as the company grows. Now is the time to adopt a single platform and avoid that problem all together. \n\n\"It enables us to write better software more efficiently,” said Dorian de Koning, DevOps lead at [Weave](https://weave.nl), a software technology developer based in The Netherlands. “We went from single manual deployment approximately every two weeks to tens of deployments a day.\"\n",[9,793,936],{"slug":4758,"featured":6,"template":686},"how-smbs-can-save-with-gitlabs-devops-platform","content:en-us:blog:how-smbs-can-save-with-gitlabs-devops-platform.yml","How Smbs Can Save With Gitlabs Devops Platform","en-us/blog/how-smbs-can-save-with-gitlabs-devops-platform.yml","en-us/blog/how-smbs-can-save-with-gitlabs-devops-platform",{"_path":4764,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4765,"content":4771,"config":4777,"_id":4779,"_type":14,"title":4780,"_source":16,"_file":4781,"_stem":4782,"_extension":19},"/en-us/blog/how-start-ospo-ten-minutes-using-gitlab",{"title":4766,"description":4767,"ogTitle":4766,"ogDescription":4767,"noIndex":6,"ogImage":4768,"ogUrl":4769,"ogSiteName":670,"ogType":671,"canonicalUrls":4769,"schema":4770},"Start an open source center of excellence in 10 minutes using GitLab","Launch your own open source program office using the OSPO Alliance's tools on GitLab","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682593/Blog/Hero%20Images/opensign.jpg","https://about.gitlab.com/blog/how-start-ospo-ten-minutes-using-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Start an open source center of excellence in 10 minutes using GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Boris Baldassari\"}],\n        \"datePublished\": \"2023-01-30\",\n      }",{"title":4766,"description":4767,"authors":4772,"heroImage":4768,"date":4774,"body":4775,"category":791,"tags":4776},[4773],"Boris Baldassari","2023-01-30","\nNow that open source has finally become a mainstream topic of conversation in the software industry, many organizations are increasingly curious about best practices for consuming, using, managing, and contributing to open source software projects. Open source software can seem alien and intimidating for organizations unfamiliar with it, and participating meaningfully and effectively in the open source ecosystem can be challenging.\n\nOrganizations especially serious about working in open source have formed [open source program offices](https://opensource.com/business/16/5/whats-open-source-program-office) (OSPOs) to spearhead their efforts. These offices are centers of excellence for an organization's ongoing work in open source. They help the organization realize the benefits of working with open source communities to accelerate innovation and build more secure tools.\n\nPerhaps your organization is considering establishing an OSPO. If it is, you likely have questions about how to get started – and especially about the best ways to help your organization become a valuable participant in the open source ecosystem.\n\nThe [OSPO Alliance](https://ospo.zone/) can help. Formed in 2021, the OSPO Alliance connects [experienced open source practitioners](https://ospo.zone/membership/) with organizations in need of seasoned guides to the open source world. Since the organization's founding, its members have composed a corpus of best open source practices called the [Good Governance Initiative Handbook](https://ospo.zone/ggi/), which explores various legal, cultural, and strategic considerations organizations face when working with open source software (and, naturally, the handbook itself is openly licensed, so anyone can contribute to it).\n\nTo celebrate the launch of the GGI Handbook Version 1.1, the OSPO Alliance went a step further: We have released the [MyGGI project](https://gitlab.ow2.org/ggi/my-ggi-board), which allows organizations to quickly create the infrastructure for their own open source program offices using GitLab.\n\nNow, let's look at what the MyGGI project can help your organization accomplish, including how to use the tool to establish an OSPO built on GGI principles — in only 10 minutes.\n\n## Working with the GGI Handbook\n\nThe GGI Handbook defines 25 activities, or best practices, organized according to various goals an organization may seek to accomplish with open source. Examples of activities include recommendations like \"Manage open source skills and resources,\" \"Manage software dependencies,\" \"Upstream first,\" or \"Engage with open source projects.\" Each of these activities, then, has a corresponding description and rationale, and the handbook provides resources, tools, and hints for successfully implementing them.\n\nActivites are intentionally generic and must be adapted to your organization's specific, unique, local context. The GGI Hanbook offers tools for doing this, too: scorecards. Scorecards allow you to assess your organization's engagement in and progress with various open source best practices.\n\nSo working with the GGI Handbook in your organization might look something like this:\n\n1. Evaluate the open source-related activities the handbook proposes and remove those that don't fit your specific context (maybe some activities will require a bit of adptation to be more relevant to the domain, while some others may just be discarded).\n1. Identify the activities that would be most beneficial to reaching your organization's goals in engaging with open source.\n1. Construct an Agile-like, iterative process for working on a small set of these activities. Do this in the form of sprints by tracking your progress with scorecards, and adapt the activity to your local context, team cultures, and available resources as you go.\n1. At the end of each iteration, review the activities your teams have completed, select a new scope for improvement, and repeat the process.\n\nThe MyGGI project provides a push-button infrastructure for doing this work. Next, let's examine how to deploy it on GitLab.\n\n### Deploying the GGI Handbook on GitLab\n\nThe OSPO Alliance wanted to provide a quick and straightforward way for organizations to establish their own open source program activities using a dashboard, so they can start implementing the GGI Handbook's methods without delay. We didn't want to reinvent the wheel with some heavy custom tooling. Instead, we decided to build the project using tools already available to us. We had already used GitLab issues to model activities during the early stages of handbook development, so reusing this GitLab feature made most sense. By simply adding some scripting to automate the initialization of activities and updating a static website on GitLab Pages, we were able to launch the project so others could easily deploy it in their own GitLab instances.\n\nInstructions for deploying the program are available in the project's [README](https://gitlab.ow2.org/ggi/my-ggi-board/-/blob/main/README.md). Let's review them here and start your own OSPO together.\n\nFirst, we need to create a new project on our GitLab instance. Select `Import project`, then `From repository by URL`. \n\nNext, we will need to provide a remote URL. Copy the existing MyGGI project by using the [URL](https://gitlab.com/gitlab-com/marketing/community-relations/open-source-program/gitlab-open-source-partners/publications-and-presentations/-/tree/main) `[https://gitlab.ow2.org/ggi/my-ggi-board.git](https://gitlab.ow2.org/ggi/my-ggi-board.git)`.\n\nThen we will give our project a unique name and choose a visibility level. Here's an example of how it might look:\n\n![Repository by URL](https://about.gitlab.com/images/blogimages/ospointenminutes/screenshot-1.png){: .shadow}\n\nWhen you have configured your desired settings, click `Create project` to continue.\n\nOur next step is to configure access privileges. Go to `Project Settings > Access Tokens` and create a `Project Access Token` with `API` privilege and `Maintainer` role. The project's scripts will use these to create the issues and generate the static website dashboard for your OSPO.\n\nWhen the token is created, copy it to a safe place, as **you will never be able to see it again**. Note that some GitLab instances prefer to disable the Project Access Token feature in favor of Personal Access Tokens. This is perfectly okay; the preference won't affect the deployment of this project (see the instructions for more details).\n\nHere's an example of what you will see at this stage:\n\n![Project access tokens](https://about.gitlab.com/images/blogimages/ospointenminutes/screenshot-2.png){: .shadow}\n\nWe will then provide this access token to the pipelines and scripts by creating a CI/CD environment variable. Go to `Project Settings` and then `CI/CD`. Scroll to the `Variables` section and add a new variable with name `GGI_GITLAB_TOKEN`. Input the access token you created in the last step as the value. Here's an example:\n\n![Add variable screen](https://about.gitlab.com/images/blogimages/ospointenminutes/screenshot-3.png){: .shadow}\n\nWe can now execute the pipeline to begin the process of creating your OSPO infrastructure. Go to `CI/CD`, then `Pipelines`, and click on `Run pipeline`. After a couple of minutes, the pipeline should finish and the website will deploy. You will see something like this when the pipeline finishes:\n\n![Pipeline passed screen](https://about.gitlab.com/images/blogimages/ospointenminutes/screenshot-4.png){: .shadow}\n\nInfrastructure for your open source program office is now ready!\n\n### Using the tools\n\nThe MyGGI project creates a set of 25 activities, along with a nice project board to help you visualize them:\n\n![Project board](https://about.gitlab.com/images/blogimages/ospointenminutes/screenshot-5.png){: .shadow}\n\nUsers can click on specific activities (rendered as issues) to read the description of the activity, understand the tools and resources that might help them complete it, and begin completing relevant scorecards. Users can also define their own perspectives on the activities, as they see them from the organization's specific context. Then they can create tasks to narrow the scope of each activity so they can iterate on it and track progress. \n\nTheir work is displayed on a static website hosted on GitLab Pages and updated nightly according to the organization's progress on various activities and tasks. This web page is especially useful to present the program and its day-to-day evolution to the organization (or the world); participants, stakeholders, and executives can review it to learn more about the various initiatives, see what work is underway, and track the overall development of the organization's open source program office. The initial website looks like this:\n\n![Welcome screen of website](https://about.gitlab.com/images/blogimages/ospointenminutes/screenshot-6.png){: .shadow}\n\n### Running your OSPO\n\nSelecting an open source program manager to oversee the work on the project boards is beneficial at this step. That person will:\n\n- Assign issues to team members to start working on new activities, create scorecards to track the work and associated tasks, and label them as \"In Progress\" instead of \"Not Started\".\n- Oversee the evolution of the work as it moves through various iterations, completing the scorecards with local resources and information, and closing issues as tasks are complete.\n- Ensure that issues keep making progress and, as team members complete them, assign new ones.\n\nAs changes occur in both the project and its issues, your OSPO's static website will regularly update to reflect the current status of activities, tasks, and the overall progress. After some time, for instance, the dashboard may look like this:\n\n![Dashboard with current status](https://about.gitlab.com/images/blogimages/ospointenminutes/screenshot-9.png){: .shadow}\n\nYou're now on your way to establishing your organization's open source program office. Don't hesitate to connect with the [OSPO Alliance](https://ospo.zone/) for help and support as you continue your journey!\n\n_Boris Baldassari is an open source consultant at the Eclipse Foundation Europe, and an active contributor to the OSPO Alliance._\n\nCover image by [Clay Banks](https://unsplash.com/@claybanks?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://www.unsplash.com)\n{: .note}\n\n",[682,978,9],{"slug":4778,"featured":6,"template":686},"how-start-ospo-ten-minutes-using-gitlab","content:en-us:blog:how-start-ospo-ten-minutes-using-gitlab.yml","How Start Ospo Ten Minutes Using Gitlab","en-us/blog/how-start-ospo-ten-minutes-using-gitlab.yml","en-us/blog/how-start-ospo-ten-minutes-using-gitlab",{"_path":4784,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4785,"content":4790,"config":4795,"_id":4797,"_type":14,"title":4798,"_source":16,"_file":4799,"_stem":4800,"_extension":19},"/en-us/blog/how-ten-steps-over-ten-years-led-to-the-devops-platform",{"title":4786,"description":4787,"ogTitle":4786,"ogDescription":4787,"noIndex":6,"ogImage":928,"ogUrl":4788,"ogSiteName":670,"ogType":671,"canonicalUrls":4788,"schema":4789},"How ten steps over ten years led to the DevOps Platform","It's been ten years since the first commit to GitLab! Here's a look at ten critical choices that shaped us.","https://about.gitlab.com/blog/how-ten-steps-over-ten-years-led-to-the-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How ten steps over ten years led to the DevOps Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2021-10-11\",\n      }",{"title":4786,"description":4787,"authors":4791,"heroImage":928,"date":4792,"body":4793,"category":679,"tags":4794},[766],"2021-10-11","\nThe first commit to GitLab (!!) was 10 years ago. Today, it’s an entirely different world: DevOps is increasingly mainstream and there's a DevOps platform revolution.\n\nWe didn’t have a crystal ball back then, but we did try to create a product, a culture and a company that reflected what we thought mattered most. Here’s a look back at 10 key decisions we made that still have impact:\n\n1. Work in parallel: When we started, it was clear the waterfall method of software development - where one stage waited on another stage and nothing happened independently - slowed everything. We decided right from the beginning that a “work in parallel” philosophy would be fundamental to our culture and our behaviors. Also, such a philosophy naturally supported everything else we did, including bringing CI and CD together and operating as an all-remote company. Working in parallel is also vital to success with DevOps.\n\n2. CI, meet git: To merge dev and ops you have to merge development and operations. We [weren’t really sure](/blog/gitlab-hero-devops-platform/) bringing CI together with a git repository was the right step to take, but we tried it and [it worked](/blog/beginner-guide-ci-cd/). Now, developers expect CI to be perfectly integrated into their daily work, and, more and more, they are using a DevOps platform to centralize CI and CD.\n\n3. Cloud native: We’ve been talking about Kubernetes and the options made possible by cloud-native development since [2017](/blog/containers-kubernetes-basics/). We’re true believers in supporting the ability to embrace cloud-native technology and patterns.  The concept of cloud native enables teams to deliver better software faster, break down their applications into microservices and focus engineering time on delivering value to their customers - not on maintaining brittle infrastructure.\n\n4. The mighty merge request: We doubled down on the idea of a merge request, making it the hub of absolutely everything. Merge requests are not only the gateway to production, but all the other critical steps, such as security checks, which can be found in there as well. Plus, the merge request serves as a living record of changes and is essential for [better code review](/blog/iteration-and-code-review/).\n\n5. Developer-first security: For developers to have ownership of security, they need scanning early in the process and results in their workflow. That’s why [developer-first security](/topics/devsecops/what-is-developer-first-security/) is at the heart of our DevOps Platform.\n\n6. A complete definition of security: Security isn’t a “one and done” effort and our DevOps Platform enables us to offer a broad spectrum of security scans that goes far beyond just SAST and DAST. From scanning for dependencies or looking at containers, we cover all the security bases in a single platform.\n\n7. All remote, all the time: With no corporate headquarters and employees in 65 countries and regions (as of October 2021), we’re [all remote](https://handbook.gitlab.com/handbook/company/culture/all-remote/guide/) and proud of it. This decision transformed into a corporate value that has influenced our choices and behaviors. \n\n8. Asynchronous communication: A natural result of being remote, [asynchronous communication](https://handbook.gitlab.com/handbook/company/culture/all-remote/asynchronous/) is something we take seriously. We’re a [“handbook first”](https://handbook.gitlab.com/handbook/company/culture/all-remote/handbook-first/) organization, meaning we write everything down so information is as self-service as possible. We also carefully consider what time is spent in meetings, limiting their frequency and regularly asking ourselves if “asynchronous” is better. This has allowed us to successfully have employees in nearly every time zone around the world and follow the working in parallel philosophy.\n\n9. Visibility: Planning is critical, but it’s equally important to pair it with visibility so everyone knows what’s happening and where it’s happening. Giving context for the original plan to all team members throughout the DevOps lifecycle, how the plan has changed, and what the implementation looks like in the end is a critical advantage to a single DevOps platform.  Without this, time is wasted trying to update multiple systems with issue status, or having conflicting information in independent tools. \n\n10. Measure the results: We firmly believe it’s important to know how the stages of the SDLC are going, in detail. After all, if you can’t measure your results, how can you know things are moving in the right direction? Many DevOps teams don’t or can’t measure, but that can make it difficult to convince management of the value of the methodology. A DevOps platform makes measurement easy.\n\n## Read more about the DevOps Platform:\n\n- [The journey to a DevOps Platform](/blog/the-journey-to-a-devops-platform/)\n\n- [Making the case for a DevOps platform: What data and customers say](/blog/making-the-case-for-a-devops-platform-what-data-and-customers-say/)\n\n- [Agile planning with a DevOps platform](/blog/agile-planning-with-a-devops-platform/)\n\n- [Welcome to the DevOps Platform era](/blog/welcome-to-the-devops-platform-era/)\n\n- [It's time to build more accessible software. A DevOps platform can help](/blog/how-the-devops-platform-makes-building-accessible-software-easier/)\n",[2534,9,728],{"slug":4796,"featured":6,"template":686},"how-ten-steps-over-ten-years-led-to-the-devops-platform","content:en-us:blog:how-ten-steps-over-ten-years-led-to-the-devops-platform.yml","How Ten Steps Over Ten Years Led To The Devops Platform","en-us/blog/how-ten-steps-over-ten-years-led-to-the-devops-platform.yml","en-us/blog/how-ten-steps-over-ten-years-led-to-the-devops-platform",{"_path":4802,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4803,"content":4809,"config":4814,"_id":4816,"_type":14,"title":4817,"_source":16,"_file":4818,"_stem":4819,"_extension":19},"/en-us/blog/how-the-devops-platform-makes-building-accessible-software-easier",{"title":4804,"description":4805,"ogTitle":4804,"ogDescription":4805,"noIndex":6,"ogImage":4806,"ogUrl":4807,"ogSiteName":670,"ogType":671,"canonicalUrls":4807,"schema":4808},"It's time to build more accessible software. A DevOps platform can help","Shifting accessibility left can make building accessible products simpler and more efficient. A DevOps platform makes it easier to customize and adjust priorities to suit your business needs.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667281/Blog/Hero%20Images/accessibility.jpg","https://about.gitlab.com/blog/how-the-devops-platform-makes-building-accessible-software-easier","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"It's time to build more accessible software. A DevOps platform can help\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2021-09-21\",\n      }",{"title":4804,"description":4805,"authors":4810,"heroImage":4806,"date":4811,"body":4812,"category":769,"tags":4813},[2002],"2021-09-21","\n\nThe earlier a feature or process is introduced in the multi-step software development lifecycle (SDLC), the more likely it is to be fully integrated into the product. \n\nIt's well documented how security can [shift left using a DevOps platform](/blog/devops-platform-supply-chain-attacks/), so it's time to make the case that accessbility needs to be thought about earlier (and, clearly, a DevOps platform can facilitate that too). Although there are laws that require applications to meet certain accessibility requirements, which [opens an application up to a broader user base](/blog/how-the-open-source-community-can-build-more-accessible-products/), rarely is accessibility considered a core product requirement. Instead, it is just a test tacked on at the end instead of being built into the DevOps platform process.\n\n**[Learn more about [how the open source development community helps build accessible software](/blog/how-the-open-source-community-can-build-more-accessible-products/)]**\n\n\"The problem really is that accessibility is not usually explicitly defined as a problem,\" said Segun Ola, a frontend web developer at engineering talent finder Andela, during a presentation at accessibility conference [axe-con](https://www.deque.com/axe-con/). \"Most of the time, developers go through the product lifecycle and we identify all the other problems with a product or all of the things we want to solve and ignore accessibility for the greater part.\"\n\n## Accessibility in software development: It starts with education\n\nOftentimes, omissions are unintentional and have more to do with a lack of awareness around why accessibility in software development is so important for many people living with disabilities and a key driver of business value. After all, the more accessible your product, the more users can benefit from it.\n\n\"I have met software engineers and designers who did not even know that there's a thing called a screen reader,\" Ola said. \"Just last week, I was reviewing some code and explaining why the code needed to be refactored. A junior engineer asked me ‘what is a screen reader?’ So I had to get on a call with him and show him how screen readers work. And then he asks me, ‘What's the point of a screen reader?’ And I had to tell him: ‘Oh yeah there are people who can't see the way you and I see.’\"\n\nThis is just one example of why having empathy and education around accessibility so important, says [Taurie Davis](/company/team/#tauriedavis), product design manager on Ecosystems at GitLab. Earlier in 2021, the GitLab UX team set a [goal to become a department of accessibility experts and advocates at GitLab](https://gitlab.com/groups/gitlab-org/-/epics/5235) by completing a 26-hour training at [Deque University](https://dequeuniversity.com/) on accessibility in software development.\n\n## Ignoring accessibility? Expect more technical debt\n\nSometimes software companies will see investment in accessibility components for a product as expensive and/or as a trade-off for innovation. Development teams that wait until the end of the SDLC to think about accessibility are more likely to have coded components that are inaccessible, only to have to go back and rework them to suit legal accessibility standards. This process can lead to an immense amount of technical debt.\n\n**Take a deep dive into [all aspects of the DevOps platform](/solutions/devops-platform/)**\n\n\"Once a team does start to become educated about accessibility and they have the empathy and have the drive to make the change and start shifting accessibility left it's easy to see all of the debt that you've accrued around accessibility,\" says Taurie. \"It can be really expensive to get yourself out of that debt.\" Taurie points to examples such as having to go back to change variables for color contrast, and ensuring that filtering and tab reordering can be done in a way that screen readers understand it.\n\n\"There are just so many different aspects and elements that could cause you to go back and just rewrite how the entire feature was originally developed and that can affect every aspect of your product,\" she adds.\n\n## Other barriers to implementing accessibility earlier\n\nFor UX designers like [Jeremy Elder](/company/team/#jeldergl), staff product designer on Ecosystems at GitLab, and Taurie, the typical workflow is about testing artifacts and responding to customer feedback, as opposed to thinking proactively about how someone might use the product.\n\n\"It’s more of a softer skill to think through a lot of those abstract ideas and what-ifs upfront rather than just saying, ‘Hey, we need this widget to do XYZ,’\" says Jeremy. \"Instead of asking questions like ‘how might somebody want to use this? How does it fit in their workflow?’. That is more inclusive thinking that helps you to do that, but it's harder and not as common.\"\n\n## Building accessible software isn’t just ethical, it drives business value\n\nOftentimes accessibility in software development is framed around building products to better serve people living with disabilities. While this is essential and ethical, accessibility can also be about building software products that can easily adapt to a user’s workflow.\n\n\"It’s more rigor around understanding workflows and how somebody is wanting to use it and less about focusing necessarily on a disability per se, or an outcome,\" says Jeremy. \"You want to think about personas or jobs to be done, not just think about the ultimate task, but how somebody is achieving that task.\"\n\n**[Ten key features](/topics/devops-platform/) of a DevOps platform**\n\nProducts that are customizable and adaptable are more likely to pique the interest of clients who have specific needs (e.g., a screenreader) or workflow preferences (e.g., using a particular type of keyboard).\n\n## What are the solutions?\n\nThe simplest solution to building more accessible software solutions is to think about accessibility at the beginning of the SDLC, rather than waiting until the end. Companies that use a complete DevOps platform like GitLab will find it simpler to take iterative steps toward shifting accessibility left. Need an example? Make accessibility part of the requirements a dev team needs to complete before a particular feature can be considered \"done.\" One way to do this would be to update issue templates and MR templates to ensure an accessibility step is part of the checklist.\n\nWhether it’s security or accessibility, shifting something left is about bringing the conversation to the beginning of the SDLC, something made much more straightforward with a DevOps platform. When it comes to accessibility, the more accessible the product is, the broader the pool of users (and future customers) can benefit.\n\n## Read more about the DevOps Platform:\n\n- [The journey to a DevOps Platform](/blog/the-journey-to-a-devops-platform/)\n\n- [How ten steps over ten years led to the DevOps Platform](/blog/how-ten-steps-over-ten-years-led-to-the-devops-platform/)\n\n- [Making the case for a DevOps platform: What data and customers say](/blog/making-the-case-for-a-devops-platform-what-data-and-customers-say/)\n\n- [Agile planning with a DevOps platform](/blog/agile-planning-with-a-devops-platform/)\n\n- [Welcome to the DevOps Platform era](/blog/welcome-to-the-devops-platform-era/)\n\n",[9,728,682],{"slug":4815,"featured":6,"template":686},"how-the-devops-platform-makes-building-accessible-software-easier","content:en-us:blog:how-the-devops-platform-makes-building-accessible-software-easier.yml","How The Devops Platform Makes Building Accessible Software Easier","en-us/blog/how-the-devops-platform-makes-building-accessible-software-easier.yml","en-us/blog/how-the-devops-platform-makes-building-accessible-software-easier",{"_path":4821,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4822,"content":4828,"config":4833,"_id":4835,"_type":14,"title":4836,"_source":16,"_file":4837,"_stem":4838,"_extension":19},"/en-us/blog/how-the-dora-metrics-can-help-devops-team-performance",{"title":4823,"description":4824,"ogTitle":4823,"ogDescription":4824,"noIndex":6,"ogImage":4825,"ogUrl":4826,"ogSiteName":670,"ogType":671,"canonicalUrls":4826,"schema":4827},"How the DORA metrics can help DevOps team performance ","The best DevOps teams measure their results. Here's a deep dive into the DORA metrics that matter.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676702/Blog/Hero%20Images/data.jpg","https://about.gitlab.com/blog/how-the-dora-metrics-can-help-devops-team-performance","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How the DORA metrics can help DevOps team performance \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aathira Nair\"}],\n        \"datePublished\": \"2022-04-20\",\n      }",{"title":4823,"description":4824,"authors":4829,"heroImage":4825,"date":4830,"body":4831,"category":769,"tags":4832},[995],"2022-04-20","\n\n_Accelerated adoption of the cloud requires tools that aid in faster software delivery and performance measurements.  Delivering visibility across the value chain, the DORA metrics streamline alignment with business objectives, drive software velocity, and promote a collaborative culture._ \n\nSoftware delivery, operational efficiency, quality - there is no shortage of challenges around digital transformation for business leaders. \n\nCustomer satisfaction, a prominent business KPI, has paved the way for experimentation and faster analysis resulting in an increased volume of change in the software development lifecycle (SDLC). Leaders worldwide are helping drive this culture of innovation aligned with organization goals and objectives. However, it is not always about driving the culture alone; it is also about collaboration, visibility, velocity, and quality. \n\nCloud computing and microservices are driving the cloud-first approach for software delivery, helping to scale them independently, and allowing teams to move faster. But, without DevOps, the team doesn’t have the underlying core to move fast efficiently. DevOps has the power to enable the smallest changes that can have great effects. \n\nThis brings us to the question - how do you measure velocity and impact? Or how do you assess quality, and ensure that it is not hampered by velocity? The latter would be what is commonly referred to as technical debt.\n\n## A continuous journey needs continuous improvement\n\nAny improvement starts with measurement. Measuring and optimizing DevOps practices improves developer efficiency, overall team performance, and business outcomes. DevOps metrics demonstrate effectiveness, shaping a culture of innovation and ultimately overall digital transformation. In the [Accelerate State of DevOps 2021](https://cloud.google.com/blog/products/devops-sre/announcing-dora-2021-accelerate-state-of-devops-report) report by the DevOps Research and Assessment (DORA) team at Google Cloud, which draws insights from 7 years of data collection and research, four metrics are the key to measure software delivery performance.\n\n## What are these metrics?\n\n- Deployment Frequency\n- Lead time for changes\n- Time to restore service\n- Change failure rate\n\n### Deployment Frequency\n\nLet’s start with the velocity of development. Deployment frequency measures how often the organization deploys code to production or releases it to end users. This metric borrows from lean manufacturing concepts, wherein small multiple batch sizes are the preferred approach for higher efficiency and more rapid adjustments.\n\n### Lead time for changes\n\nNow comes the extent of automation in your processes. Lead time for changes measures the time needed to take a committed code to successfully run in production. This is one of the two metrics with significant variance in the data. \n\n### Time to restore service\n\nThis represents a business' capacity. Time to restore service measures the time needed to restore services to the level they were previously, in case of an incident. Here too we see significant variance in the data.\n\n### Change failure rate\n\nAnd finally, we take a look at quality. Changes which cause a failure in the system – a deployment failure, an incident, a rollback or a remedy – all contribute to measuring the change failure rate. \n\n## Driving visibility into the DevOps lifecycle\n\nRecently, Zoopla used DORA metrics to boost deployments and increase automation. Understanding the root cause of their problems helped them make informed adjustments in their process workflows, automation, tools, and more. They recognized the value of using a single platform to overcome roadblocks in velocity and innovation. This brought added visibility into their system which helped improve measurement and analytics. \n\nOur [2021 Global DevSecOps Survey](/developer-survey/) shows engineers are happier when they can focus on innovation and adding value than when maintaining integrations. In fact they would rather focus on higher quality documentation which can further amplify results of investments in DevOps capabilities. Documentation and visibility together drives team performance and competitive advantage. \n\nVisibility driven through [DORA metrics](https://docs.gitlab.com/ee/user/analytics/#supported-dora-metrics-in-gitlab) can uncover bottlenecks such as a dysfunction in code review, allowing management to identify causes of slowdowns in the DevOps lifecycle, and enable engineering leaders to align with business priorities. This delivers continuous improvement and progress towards business goals, promoting a collaborative culture across the organization.\n\nThe team at Zoopla used the GitLab DevOps platform to obtain metrics for deploy frequency, lead time, change fail rate, and time to onboard. \n\nimage_title: ![VSA-DORA](https://about.gitlab.com/images/blogimages/VSA-DORA.png)\n\nThe metrics helped influenced decision making and prioritization at Zoopla. Teams were encouraged to learn from the metrics, and incorporate changes into their planning cycles to keep on the path of continuous improvement. They were successful in measuring improvements and building an efficient engineering team that was flexible in responding to business needs. \n\n[Read more on [how Zoopla used DORA metrics for continuous improvement](/blog/how-zoopla-uses-dora-metrics-and-your-team-can-too/) and the [DORA metrics API in GitLab](https://docs.gitlab.com/ee/api/dora/metrics.html#devops-research-and-assessment-dora-key-metrics-api)]\n",[9,813,749],{"slug":4834,"featured":6,"template":686},"how-the-dora-metrics-can-help-devops-team-performance","content:en-us:blog:how-the-dora-metrics-can-help-devops-team-performance.yml","How The Dora Metrics Can Help Devops Team Performance","en-us/blog/how-the-dora-metrics-can-help-devops-team-performance.yml","en-us/blog/how-the-dora-metrics-can-help-devops-team-performance",{"_path":4840,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4841,"content":4846,"config":4852,"_id":4854,"_type":14,"title":4855,"_source":16,"_file":4856,"_stem":4857,"_extension":19},"/en-us/blog/how-to-agentless-gitops-aws",{"title":4842,"description":4843,"ogTitle":4842,"ogDescription":4843,"noIndex":6,"ogImage":928,"ogUrl":4844,"ogSiteName":670,"ogType":671,"canonicalUrls":4844,"schema":4845},"How to Use Push-Based GitOps with Terraform & AWS ECS/EC2","Learn how GitLab supports agentless approach for GitOps on AWS.","https://about.gitlab.com/blog/how-to-agentless-gitops-aws","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use a push-based approach for GitOps with Terraform and AWS ECS and EC2\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2021-08-10\",\n      }",{"title":4847,"description":4843,"authors":4848,"heroImage":928,"date":4849,"body":4850,"category":791,"tags":4851},"How to use a push-based approach for GitOps with Terraform and AWS ECS and EC2",[1727],"2021-08-10","\n\nIn [part two of our GitOps series](/blog/how-to-agentless-gitops-vars/), we described how to use a push-based (or agentless) approach for [GitOps](/topics/gitops/) by using GitLab scripting capabilities as well as integrating infrastructure-as-code tools into GitOps pipelines. In this third blog post, we’ll also dig deep into how to use a push-based approach, but this time our focus will be on the integrations of Terraform, AWS ECS, and AWS EC2 in GitOps flows. This approach may be preferable when using infrastructure components that aren't Kubernetes, such as VMs, physical devices, and cloud-provider services.\n\nSimilar to Ansible – an agentless IT automation solution – Terraform can be leveraged by the scripting capabilities of GitLab to shape your infrastructure. GitLab also provides out-of-the-box integrations with Terraform, such as GitLab-managed Terraform state and Terraform plan reports in merge requests.\n\n## GitOps flows with GitLab and Terraform\n\nIn this section, we explain how to use GitLab and Terraform for a non-Kubernetes GitOps flow and Kubernetes GitOps.\n\n### GitLab and Terraform for non-K8s infrastructure\n\nGitLab leverages Terraform to provision a non-Kubernetes infrastructure component, namely a MySQL database running on AWS.\n\nNote: Ideally, the provisioning of a database should be an on-demand, self-service process that developers can just use. We use this scenario to illustrate a GitOps flow using a non-Kubernetes infrastructure component.\n\n#### How collaboration works in GitLab\n\nSasha, a developer, creates an issue and assigns the issue to Sidney, the database administrator, who then creates a Merge Request (MR) to start her work and invite collaboration with other stakeholders across the organization. Opening the MR automatically creates a feature branch for the GitLab project. Sidney uses Terraform to create an infrastructure-as-code configuration for the database, named `mysqlmain.tf`. The database happens to be an AWS RDS MySQL instance. The database Terraform configuration file should look like this:\n\n![Terraform configuration file for MySQL database](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/0-tf-mysqlmain-created.png){: .shadow.small.center.wrap-text}\nTerraform configuration file for MySQL database.\n{: .note.text-center}\n\nTake note of the version of the database (`engine_version`), the database storage (`allocated_storage`), and the embedded database admin user (`username`) and password, in the image above.\n\nAs soon as Sidney adds the file `mysqlmain.tf` file to the feature branch, a pipeline is automatically executed by GitLab in the MR. As part of the review process, a \"Terraform plan\" is executed against the Terraform files and the output is attached to the MR as an artifact:\n\n![Terraform plan output attached to Merge Request](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/1-tf-report-in-MR.png){: .shadow.small.center.wrap-text}\nTerraform plan output attached to MR.\n{: .note.text-center}\n\nIn the picture above, you can see the note \"1 Terraform report was generated in your pipelines\". You can click on the `View full log` button to see the output file of the \"Terraform plan\" command that was run against the new configuration file, as seen below:\n\n![Terraform plan output detailed log view](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/2-tf-plan-output.png){: .shadow.small.center.wrap-text}\nTerraform plan output detailed log view.\n{: .note.text-center}\n\nThe Terraform output shows that a database will be created once this configuration file is applied to the infrastructure. The artifacts attached to an MR provide information that can help stakeholders review the proposed changes. The Terraform output in the MR fosters collaboration between stakeholders, and leads to infrastructure that is more consistent, resilient, reliable, and stable, and helps prevent unscheduled outages.\n\nIn the image below, we see how reviewers can collaborate in GitLab. The screenshow shows that the original requester, Sasha, notices that a database storage of 5 GB is too small, so she makes an inline suggestion to increase the database storage capacity to 10 GB.\n\n![Inline suggestion to increase database storage to 10GB](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/3-tf-inline-suggestion-by-Sasha.png){: .shadow.small.center.wrap-text}\nInline suggestion to increase database storage to 10GB.\n{: .note.text-center}\n\nInline suggestions foster collaboration and help increase developer productivity suggested changes can be added with the click of a button.\n\nNext, Sidney invites DevOps engineer Devon to collaborate on the MR. Devon notices that the database version in the configuration file is not the latest one. He proceeds to make an inline suggestion proposing a more up-to-date version for Sidney to review:\n\n![Inline suggestion to update database version](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/4-tf-inline-suggestion-by-Devon.png){: .shadow.small.center.wrap-text}\nInline suggestion to update database version.\n{: .note.text-center}\n\nSidney can monitor the discussion between code reviewers on the MR by tracking the number of unresolved threads. So far, there are four unresolved threads:\n\n![Number of unresolved threads displayed at the top of the MR](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/5-tf-unresolved-threads-for-Sidney.png){: .shadow.small.center.wrap-text}\nNumber of unresolved threads displayed at the top of the MR.\n{: .note.text-center}\n\nSidney starts resolving the threads by following the convenient thread navigation provided by GitLab, which makes it easy for her to process each of the proposed review items. Sidney just needs to click \"Apply suggestion\" to accept an input from a reviewer:\n\n![Applying a suggestion with a single button click](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/6-tf-apply-inline-suggestion-by-Sidney.png){: .shadow.small.center.wrap-text}\nApplying a suggestion with one click.\n{: .note.text-center}\n\nDevon suggested replacing the embedded database admin username and password with a parameter in the inline review, so Sidney replaces the embedded values with variables. The variable values will be managed by masked variables within GitLab:\n\n![Parameterizing variables in Terraform configuration file](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/7-tf-parameterizing-vars-by-Sidney.png){: .shadow.small.center.wrap-text}\nParameterizing variables in Terraform configuration file.\n{: .note.text-center}\n\nOnce the threads are resolved and the stakeholders involved in thh MR finish collaborating, it's time to merge.\n\nLearn more about how GitLab fosters collaboration using the principles of GitOps in the video below:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/onFpj_wvbLM\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\nIn this next example, Sasha is the one merging the MR:\n\n![Merge Request with infrastructure updates being merged](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/8-tf-MR-merged.png){: .shadow.small.center.wrap-text}\nMR with infrastructure updates being merged.\n{: .note.text-center}\n\nMerging automatically launches a pipeline that will apply the changes to the infrastructure:\n\n![GitOps pipeline completed execution](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/9-tf-pipeline-complete.png){: .shadow.small.center.wrap-text}\nGitOps pipeline completed execution.\n{: .note.text-center}\n\n#### CI/CD with non-K8s infrastructure\n\nThe CI/CD pipeline in the previous example works by validating the infrastructure configuration files. Then the pipeline validates the proposed updates against the current state of the infrastructure. Finally, it applies the updates to the production infrastructure.\n\nRunning this GitOps flow results in a brand new MySQL database on AWS RDS:\n\n![A new MySQL database has been created via a GitOps flow](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/10-db-ready.png){: .shadow.small.center.wrap-text}\nA new MySQL database has been created via a GitOps flow.\n{: .note.text-center}\n\nBy checking the details of the new MySQL database you can corroborate that the database storage is 10 GB and that the database version is the most current\"\n\n![Resulting MySQL database configuration from the collaboration of stakeholders](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/11-db-version-and-10g-storage.png){: .shadow.small.center.wrap-text}\nThe MySQL database configuration built by team member collaboration.\n{: .note.text-center}\n\nIn the next section, we look at how a similar GitOps flow can be applied to a Kubernetes cluster.\n\n### GitLab and Terraform for K8s infrastructure\n\nWe skip past all the collaboration steps to focus on a change to the EKS cluster Terraform configuration file. In the picture below, a user is changing the minimum size of the autoscaling group of the EKS cluster from one to two:\n\n![Raising autoscaling group minimum to 2](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/12-worker-nodes-to-two.png){: .shadow.small.center.wrap-text}\nIncreasing autoscaling group minimum to two.\n{: .note.text-center}\n\nWhen the stakeholder commits the change in the MR, a CI/CD pipeline validates the configuration, performs a plan against production, and applys the updates to the production infrastructure. After the pipeline finishes, the user can log into the Amazon EC2 console to verify that the EKS cluster now has a minimum of two nodes in its autoscaling group:\n\n![GitOps flow modified the number of worker nodes in K8s cluster](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/13-two-worker-nodes-on-AWS.png){: .shadow.small.center.wrap-text}\nGitOps flow modified the number of worker nodes in K8s cluster.\n{: .note.text-center}\n\nSee this scenario in action by watching the [GitOps presentation](/topics/gitops/gitops-multicloud-deployments-gitlab/) on our GitOps topics page.\n\n## GitOps flows for non-K8s (like ECS, EC2)\n\nGitLab also provides Auto Deploy capabilities to streamline application deployment to ECS and EC2, so you can shape infrastructure as desired.\n\n### Deploying to Amazon ECS\n\nAfter creating your ECS cluster, GitLab can deliver your application and its infrastructure to the cluster by including the ECS Deployment template in your `gitlab-ci.yml`, using CI/CD.\n\n```\ninclude:\nTemplate: AWS/Deploy-ECS.gitlab-ci.yml\n```\n\nNext, create the `ECS Task Definition` file in your project that specifies your app's infrastructure requirements, along with other details.\n\n![ECS Task Definition file snippet](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/14-ECS-taskdef-file.png){: .shadow.small.center.wrap-text}\nECS Task Definition file snippet.\n{: .note.text-center}\n\nFinally, define the project variable that will drive the template:\n\n![Project variables required to auto-deploy to ECS](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/15-ECS-related-vars.png){: .shadow.small.center.wrap-text}\nProject variables required to auto-deploy to ECS.\n{: .note.text-center}\n\nThe ECS deployment template does the rest, including support review pipelines.\n\n![Review pipeline in GitOps flow](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/16-ECS-review-pipeline.png){: .shadow.small.center.wrap-text}\nReview pipeline in GitOps flow.\n{: .note.text-center}\n\nIn the review pipeline above, stakeholders can review the proposed changes before sending to production. The two screenshots below show different aspects of the proposed changes in the log output of the `review_fargate` job:\n\n![Configuring load balancers in ECS](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/17-review-fargate-log-begin.png){: .shadow.small.center.wrap-text}\nConfigure load balancers in ECS.\n{: .note.text-center}\n\nSee the configuration for infrastructure components like load balancers in the image above. The image below shows infrastructure components like subnets, security groups, and the assignment of a public IP address:\n\n![Configuring subnets, security groups in ECS](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/18-review-fargate-log-middle.png){: .shadow.small.center.wrap-text}\nConfiguring subnets and security groups in ECS.\n{: .note.text-center}\n\nOnce all stakeholders are done collaborating on a proposed change to the production infrastructure, the updates are applied using a CI/CD pipeline. Below is an example of this type of pipeline:\n\n![Applying infrastructure updates to production](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/19-ECS-prod-pipeline.png){: .shadow.small.center.wrap-text}\nApplying infrastructure updates to production.\n{: .note.text-center}\n\nRead our documentation to learn more about [how GitLab users can Auto Deploy to ECS](https://docs.gitlab.com/ee/ci/cloud_deployment/#deploy-your-application-to-the-aws-elastic-container-service-ecs).\n\n### Deploying to Amazon EC2\n\nGitLab also provides a built-in template to provision infrastructure and deploy your applications to EC2 as part of Auto DevOps. The template:\n\n- Provisions infrastructure using AWS CloudFormation\n- Pushes application to S3\n- Deploys your application from S3 to EC2\n\nEach of these steps requires a JSON configuration file. Below is an example of a portion of a CloudFormation Stack JSON file used to create your infrastructure:\n\n![CloudFormation stack JSON snippet](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/20-EC2-portion-stack-file.png){: .shadow.small.center.wrap-text}\nCloudFormation stack JSON snippet.\n{: .note.text-center}\n\nThe JSON used by the Auto Deploy template to push your application to S3 would look similar to this:\n\n![JSON to push application to S3](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/21-EC2-push-file.png){: .shadow.small.center.wrap-text}\nJSON to push application to S3.\n{: .note.text-center}\n\nAnd the file used for the actual deployment of your application from S3 to EC2 would be like the following:\n\n![JSON to deploy application to EC2](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/22-EC2-deploy-file.png){: .shadow.small.center.wrap-text}\nJSON to deploy application to EC2.\n{: .note.text-center}\n\nAfter creating these files, you need to create the following variables in your project - displayed here with some sample values:\n\n```\nvariables:\n  CI_AWS_CF_CREATE_STACK_FILE: 'aws/cf_create_stack.json'\n  CI_AWS_S3_PUSH_FILE: 'aws/s3_push.json'\n  CI_AWS_EC2_DEPLOYMENT_FILE: 'aws/create_deployment.json'\n  CI_AWS_CF_STACK_NAME: 'YourStackName'\n```\n\nThe last step is to include the template in your `.gitlab-ci.yml` file:\n\n```\ninclude:\n  - template: AWS/CF-Provision-and-Deploy-EC2.gitlab-ci.yml\n```\n\nMore details on [how GitLab uses Auto Deploy to EC2 are available in the documentation](https://docs.gitlab.com/ee/ci/cloud_deployment/#provision-and-deploy-to-your-aws-elastic-compute-cloud-ec2).\n\n## Agent or agentless: GitLab has your GitOps flows covered\n\nWhether your situation calls for an agent-based/pull-approach to doing GitOps, or for an agentless/push-approach, GitLab has your back. GitLab offers the flexibility to choose the approach to GitOps that best fits your specific projects or applications. GitLab also supports many types of infrastructures – from physical components and virtual machines, Kubernetes and containers, as well as infrastructure-as-code tools like Terraform, Ansible, and AWS Cloud Formation.\n",[534,9,1731],{"slug":4853,"featured":6,"template":686},"how-to-agentless-gitops-aws","content:en-us:blog:how-to-agentless-gitops-aws.yml","How To Agentless Gitops Aws","en-us/blog/how-to-agentless-gitops-aws.yml","en-us/blog/how-to-agentless-gitops-aws",{"_path":4859,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4860,"content":4866,"config":4872,"_id":4874,"_type":14,"title":4875,"_source":16,"_file":4876,"_stem":4877,"_extension":19},"/en-us/blog/how-to-agentless-gitops-vars",{"title":4861,"description":4862,"ogTitle":4861,"ogDescription":4862,"noIndex":6,"ogImage":4863,"ogUrl":4864,"ogSiteName":670,"ogType":671,"canonicalUrls":4864,"schema":4865},"Using push-based GitOps with GitLab scripts and variables","Learn how GitLab supports agentless approach for GitOps with scripting and variables.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682051/Blog/Hero%20Images/agentless-gitops-vars-cover-880x587.jpg","https://about.gitlab.com/blog/how-to-agentless-gitops-vars","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use a push-based approach for GitOps with GitLab scripting and variables\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2021-07-23\",\n      }",{"title":4867,"description":4862,"authors":4868,"heroImage":4863,"date":4869,"body":4870,"category":791,"tags":4871},"How to use a push-based approach for GitOps with GitLab scripting and variables",[1727],"2021-07-23","\n\nIn [part one](/blog/how-to-use-agent-based-gitops/) of our GitOps series, we described how to use a pull-based (or agent-based) approach. In this second blog post, we'll dig deep into how to use a push-based approach. The agentless approach may be preferable for situations with non-Kubernetes infrastructure components or when you don't want to install, run, and maintain agents in each infrastructure component for [GitOps](/topics/gitops/). In this post, we will discuss how the scripting capabilities of GitLab can be used in GitOps workflows, and how to use predefined GitLab variables to shape infrastructure components.\n\n## About a push-based or agentless approach\n\nWith the agentless approach, infrastructure expressed and managed as code on GitLab, and updates and drift detection are automated and handled by GitLab without having to install any agents on infrastructure components.\n\n## How to use scripting in your pipelines to shape infrastructure\n\nGitLab allows automation using scripting. Whether you're using Docker, Helm, Ansible, or even direct SSH commands, you can use the scripting capabilities of GitLab to create, shape, and modify infrastructure.\n\nIn the example below, the pipeline determines the shape of the infrastructure the application runs on by specifying a Docker image as well as running Docker commands to build and push an application to the GitLab built-in container registry.\n\n![Using Docker in your pipeline to shape infrastructure](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-vars/0-docker-use-in-pipeline.png){: .shadow.small.center.wrap-text}\nHow to use Docker in your pipeline to shape infrastructure.\n{: .note.text-center}\n\nThe infrastructure is shaped again at a later stage in the pipeline, but this time by using kubectl and Helm commands:\n\n![Using kubectl in your pipeline to shape infrastructure](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-vars/1-helm-use-in-pipeline.png){: .shadow.medium.center.wrap-text}\nHow to use kubectl in your pipeline to shape infrastructure.\n{: .note.text-center}\n\nDepending on the type of infrastructure, other technologies can be used to shape the infrastructure. In the next example, Ansible is used to run a playbook that sets up the infrastructure for an entire lab environment:\n\n![Using Ansible in your pipeline to shape infrastructure](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-vars/2-ansible-use-in-pipeline.png){: .shadow.medium.center.wrap-text}\nHow to use Ansible in your pipeline to shape infrastructure.\n{: .note.text-center}\n\nThe scripting capabilities of GitLab pipelines combined with GitLab's CI/CD capabilities allow users to create GitOps flows to manage Infrastructure as Code (IaC), which delivers more resilient infrastructure and less risk of unscheduled downtime.\n\n## How to use Auto DevOps to modify infrastructure using variables\n\nGitLab also allows users to shape infrastructure by using project or group variables. The number of production pods in a Kubernetes cluster is updated to four in the example below:\n\n![Using variables to shape infrastructure](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-vars/3-ado-modify-infra-via-vars.png){: .shadow.medium.center.wrap-text}\nHow to use variables to shape infrastructure.\n{: .note.text-center}\n\nThe number of the production pods are changed to four on the next execution of the pipeline:\n\n![Production pods increased via a variable update](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-vars/4-ado-modified-infra-via-vars.png){: .shadow.medium.center.wrap-text}\nProduction pods changed using a variable update.\n{: .note.text-center}\n\nThere are many GitLab [build and deployment variables](https://docs.gitlab.com/ee/topics/autodevops/customize.html#build-and-deployment) that can modify infrastructure. [PostgreSQL](https://www.postgresql.org/) is provisioned as a component in infrastructure by default in GitLab to support applications that require a database and also provides [these variables](https://docs.gitlab.com/ee/topics/autodevops/customize.html#database) to customize it.\n\n## How GitLab capabilities help agentless infrastructure\n\nThe scripting capabilities of GitLab are a convenient way to shape infrastructure components in GitOps workflows using a push-based approach. This method allows for the easy integration of IaC tools in your GitOps pipelines. If you are doing IaC and GitOps for non-Kubernetes infrastructure components, this is the best approach. GitLab also provides out-of-the-box variables, so users can impact selected infrastructure components. In the final part of this GitOps series, we will discuss an agentless approach using our integration to Terraform as well as examples of GitOps flows for AWS ECS and EC2.\n\nCover image by [Rod Long](https://unsplash.com/@rodlong?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/machu-picchu?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n\n\n## Read more on GitOps with GitLab: \n\n- [GitOps with GitLab: Infrastructure provisioning with GitLab and Terraform](/blog/gitops-with-gitlab-infrastructure-provisioning/)\n\n- [Here's how to do GitOps with GitLab](/blog/gitops-with-gitlab/)\n\n- [GitOps viewed as part of the Ops evolution](/blog/gitops-as-the-evolution-of-operations/)\n\n- [GitOps with GitLab: Connect with a Kubernetes cluster](/blog/gitops-with-gitlab-connecting-the-cluster/)\n\n\n\n",[534,9,1731],{"slug":4873,"featured":6,"template":686},"how-to-agentless-gitops-vars","content:en-us:blog:how-to-agentless-gitops-vars.yml","How To Agentless Gitops Vars","en-us/blog/how-to-agentless-gitops-vars.yml","en-us/blog/how-to-agentless-gitops-vars",{"_path":4879,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4880,"content":4885,"config":4890,"_id":4892,"_type":14,"title":4893,"_source":16,"_file":4894,"_stem":4895,"_extension":19},"/en-us/blog/how-to-ask-smarter-devops-questions",{"title":4881,"description":4882,"ogTitle":4881,"ogDescription":4882,"noIndex":6,"ogImage":1449,"ogUrl":4883,"ogSiteName":670,"ogType":671,"canonicalUrls":4883,"schema":4884},"How to ask smarter DevOps questions","Take your DevOps practice to the next level by asking 10 critical questions.","https://about.gitlab.com/blog/how-to-ask-smarter-devops-questions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to ask smarter DevOps questions\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-06-22\",\n      }",{"title":4881,"description":4882,"authors":4886,"heroImage":1449,"date":4887,"body":4888,"category":769,"tags":4889},[851],"2022-06-22","\n\nGitLab has [surveyed DevOps practitioners](/developer-survey/) for more than five years now. In that time, we have come to know what questions to ask to understand how well teams are doing with DevOps. In sharing these 10 questions, we aim to help you assess your own team’s capabilities and achieve smarter, faster DevOps.\n\n### How fast is your team releasing code today vs. one year ago?\n\nTracking release speed is like taking the temperature of your DevOps team. You’d like to think everything is going well, but you might be surprised. Occasionally DevOps teams report to us they are actually releasing code more slowly than in the past. \n\n### What stage(s) in the process are causing the most release delays?\n\nThis question will shine a spotlight on the areas in your DevOps practice that simply don’t work. Spoiler alert: The answer [will certainly be testing](/blog/the-software-testing-life-cycle-in-2021-a-more-upbeat-outlook/), though other things, from planning to code development and code review, might pop up, too.\n\n### How automated is your DevOps process?\n\nAsk this, but don’t just focus on testing, tempting as that might be. Also think about what else in the software development lifecycle would [benefit from automation](/blog/cd-automated-integrated/). Consider what getting that time back would afford you. Could you assign your developers and ops pros to other business-critical projects?\n\n### What’s been added to your DevOps tech stack over the last year?\n\nIt’s good to look back and take inventory of the technology you have in play. This is also data that can help inform what your next steps might be, such as adopting [GitOps](/topics/gitops/), [observability](/blog/observability-vs-monitoring-in-devops/), or [AI](https://www.youtube.com/watch?v=C08QVI99JLo).\n\n### How are your DevOps roles changing?\n\nIf your team is like others we’ve heard from, (big) changes are happening. Devs are picking up tasks that have traditionally been owned by ops, ops is becoming anything from a DevOps coach to a [platform engineer](/topics/devops/what-is-a-devops-platform-engineer/) or a cloud expert, and security is likely now embedded in development teams.\n\n### How does security integrate with DevOps in your organization?\n\nThe most successful DevOps teams have figured out how to [bridge the dev and sec divide](/blog/developer-security-divide/). Whether your team has a [security champion](/blog/why-security-champions/) or actually embeds sec pros on the dev team, this is a critical piece in the process to release safer software faster.\n\n### What advanced technologies are you using (or considering) in your DevOps practice?\n\n“Bots” can test code, [AI can review code](/blog/ai-in-software-development/), and a [low code/no code tool](/blog/low-code-no-code/) will make [citizen developers](https://www.gartner.com/en/information-technology/glossary/citizen-developer) out of anyone in the organization. Now is definitely the time to make sure your DevOps team is future-proofing the tech stack.\n\n### Do you have a plan for governance and compliance of your software supply chain?\n\nTo keep the [software supply chain secure](/blog/elite-team-strategies-to-secure-software-supply-chains/), DevOps teams need visibility into and control over the entire development lifecycle. Can you easily deal with audits or attestations of compliance? Mature governance and compliance processes are essential in all industries today, not just those that are highly regulated.\n\n### What advanced practices are you using (or considering) in your DevOps environment?\n\nWhether it’s [Infrastructure as Code (IaC)](/topics/gitops/infrastructure-as-code/), GitOps, or [MLOps](/blog/introducing-modelops-to-solve-data-science-challenges/), cutting-edge practices can jumpstart your releases and bring new and interesting opportunities to DevOps teams.\n\n### Do you regularly assess DevOps careers and roles on your team?\n\nHappy team members [really are more productive](/blog/why-software-developer-job-satisfaction-matters-and-how-to-make-it-happen/), so consider this a PSA to keep career growth conversations a priority. \n\nIn considering these 10 questions, your team will gain a fuller picture of your DevOps capabilities and how to address the technology and talent gaps you have identified.\n\n",[9,916,749],{"slug":4891,"featured":6,"template":686},"how-to-ask-smarter-devops-questions","content:en-us:blog:how-to-ask-smarter-devops-questions.yml","How To Ask Smarter Devops Questions","en-us/blog/how-to-ask-smarter-devops-questions.yml","en-us/blog/how-to-ask-smarter-devops-questions",{"_path":4897,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4898,"content":4904,"config":4909,"_id":4911,"_type":14,"title":4912,"_source":16,"_file":4913,"_stem":4914,"_extension":19},"/en-us/blog/how-to-automate-localization-for-flutter-apps",{"title":4899,"description":4900,"ogTitle":4899,"ogDescription":4900,"noIndex":6,"ogImage":4901,"ogUrl":4902,"ogSiteName":670,"ogType":671,"canonicalUrls":4902,"schema":4903},"How to automate localization for Flutter apps","Follow this tutorial to learn how to simplify the localization process on GitLab with Localizely.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679465/Blog/Hero%20Images/flutterbanner.png","https://about.gitlab.com/blog/how-to-automate-localization-for-flutter-apps","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to automate localization for Flutter apps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2021-12-10\",\n      }",{"title":4899,"description":4900,"authors":4905,"heroImage":4901,"date":4906,"body":4907,"category":791,"tags":4908},[745],"2021-12-10","\n\nLocalization is an indispensable part of today's software. Almost all successful companies strive to adapt their products to different languages, regions, and cultures. Customer satisfaction is crucial for business. However, that often comes at a cost in terms of the higher complexity in software development and maintenance. In addition to regular activities, you must also take care of translation, its synchronization with development processes, and the like.\n\nThe question is: Can we somehow simplify the localization process and make it more agile? The answer is “yes.\" Below, you can see how GitLab and the [Localizely](https://localizely.com/) platform can help. For that purpose, we will use a simple Flutter project. However, the same approach can be applied to other programming languages and frameworks.\n\n## A few words about the Flutter project\n\nFlutter is an open-source framework developed by Google for building multi-platform apps from a single codebase. It has become quite popular lately, as it solves some things much better than some other solutions (hot-reload, performance, etc.). Since the point of this post is the automation of localization, we will not deal with Flutter too much. But we will certainly highlight some important things regarding localization in Flutter projects.\n\nWhatever approach you used to create and localize your Flutter project, its structure would probably be similar to the one below. \n\n![Flutter project structure](https://about.gitlab.com/images/blogimages/fluttergraphic.png){: .shadow.small.left}\n\nAbove, you can see the l10n folder with the two [ARB](https://localizely.com/flutter-arb/) files. Each ARB file contains translations for one language in the Flutter project (i.e. intl_de.arb for German and intl_en.arb for English). Whenever we want to add, modify, or remove a translation, we need to update those files. In other words, those files are the basis of localization in Flutter projects. They separate programming from translation but require synchronization with your code so that each message has a corresponding translation.\n\n## The usual way of localization\n\nThere is no exact rule or process that describes the usual way of localization. However, we could roughly describe it as the routine of a few steps:\n\n1. The developer updates code and the main ARB file.\n2. The developer sends ARB files to the project manager.\n3. The project manager sends ARB files to translators (e.g. email, upload to localization platform, etc.).\n4. The translators work on translations.\n5. The project manager forwards translated ARB files to the developer.\n6. The developer updates the Flutter project with new translations.\n\nIn this simplified case of localization, we can already notice some tasks that drain a lot of time and can be a bottleneck. Those are steps 2, 3, and 5. Moreover, these six steps can be frequent (e.g. update of the UI, new feature, etc.), which is not exactly the optimal solution. And that is even truer for medium and large teams. Just imagine how much time is wasted on file sharing when you have to coordinate in a team of 10+ people. Not to mention the problem with outdated ARB files.\n\n## Automated localization\n\nSince you've seen some flaws in the usual way of localization, let's see how we can optimize that.\n\n1. The developer updates code, the main ARB file, and pushes changes to GitLab.\n2. GitLab informs Localizely via webhook regarding new changes. \n3. Localizely fetches ARB files from GitLab and lets translators work on translations.\n4. The project manager pushes updated ARB files to GitLab via [MR](https://docs.gitlab.com/ee/user/project/merge_requests/).\n5. The developer updates the Flutter project with new translations (merge MR).\n\nThis way of working enables everyone to do their job more efficiently. Developers can be focused on the development of the product, translators on translations, managers on management, and similar. It should also be noted that with this type of workflow, you can easily accelerate the development and delivery of new features, which is in everyone's interest.\n\nTo make this workflow possible, you need to adjust a few things. In the following, you can see the necessary settings.\n\n1. Add a [localizely.yml](https://localizely.com/configuration-file/) config file to the root of your Flutter project. \n2. Set up [GitLab integration](https://localizely.com/gitlab-integration/) on the Localizely platform. \n3. Add a webhook to the GitLab repository.\n\nAnd that’s all. You have automated localization on your Flutter project. Whenever the developer pushes the changes to GitLab, the translators will see new string keys on the Localizely. Once the translation is done, a single click on the button creates a new MR with the latest translations on GitLab. There is no need for a mediator, waiting, or sending ARB files for every little thing. Now you can have more time for other things as this tedious work is automated.\n\n## Final thoughts\n\nIn this post, you have seen the most common steps of localization in Flutter projects and how to automate some of them. Knowing how important efficiency is today, we should strive to automate repetitive tasks as much as possible. As someone once said, “Lost time is never found again”.\n\n",[9,1339,978],{"slug":4910,"featured":6,"template":686},"how-to-automate-localization-for-flutter-apps","content:en-us:blog:how-to-automate-localization-for-flutter-apps.yml","How To Automate Localization For Flutter Apps","en-us/blog/how-to-automate-localization-for-flutter-apps.yml","en-us/blog/how-to-automate-localization-for-flutter-apps",{"_path":4916,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4917,"content":4922,"config":4927,"_id":4929,"_type":14,"title":4930,"_source":16,"_file":4931,"_stem":4932,"_extension":19},"/en-us/blog/how-to-automate-software-delivery-using-quarkus-and-gitlab",{"title":4918,"description":4919,"ogTitle":4918,"ogDescription":4919,"noIndex":6,"ogImage":1331,"ogUrl":4920,"ogSiteName":670,"ogType":671,"canonicalUrls":4920,"schema":4921},"How to automate software delivery using Quarkus and GitLab","Here's a step-by-step guide to automated software delivery using Supersonic Subatomic Java (Quarkus) and GitLab.","https://about.gitlab.com/blog/how-to-automate-software-delivery-using-quarkus-and-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to automate software delivery using Quarkus and GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2022-06-09\",\n      }",{"title":4918,"description":4919,"authors":4923,"heroImage":1331,"date":4924,"body":4925,"category":769,"tags":4926},[1727],"2022-06-09","\n\nIn this day and age, organizations need to deliver innovative solutions faster than ever to their customers to stay competitive. This is why solutions that speed up software development and delivery, such as Quarkus and GitLab, are being adopted by teams across the world.\n\n[Quarkus](https://quarkus.io/), also known as the Supersonic Subatomic Java, is an open source Kubernetes-native Java stack tailored for OpenJDK HotSpot and GraalVM, crafted from respected Java libraries and standards. Quarkus has been steadily growing in popularity and use because of the benefits that it delivers: cost savings, faster time to market/value, and reliability. Quarkus offers two modes: Java and native. Its Java mode builds your application using the JDK and its native mode compiles your Java code into a native executable.\n\nGitLab, the One DevOps Platform, includes capabilities for all DevOps stages, from planning to production, all with a single model and user interface to help you ship secure code faster to any cloud and drive business results. Besides DevOps support, GitLab also offers GitOps support.\n\nThe combination of Quarkus and GitLab can empower your developers and operations teams to collaborate better, spend more time innovating to deliver business value and differentiating capabilities to end users.\n\nIn this article, we show how to automate the software delivery of a generated Quarkus application in Java mode using GitLab Auto DevOps. Below we list the steps how to accomplish this.\n\n## Prerequisite\n\nThe prerequisite for the subsequent instructions is to have a K8s cluster up and running and associated to a group in your GitLab account. For an example on how to do this, please watch this [video](https://youtu.be/QRR3WuwnxXE).\n\n## Generate your Quarkus project using the generator and upload to GitLab\n\n- From a browser window, point to the Quarkus generator site, https://code.quarkus.io, and click on the button **Generate your application**.\n\n![Generate Quarkus app](https://about.gitlab.com/images/blogimages/quarkusone.png){:small.center.}\n\nGenerate a sample Quarkus application using the generator\n{: .note.text-center}\n\n- On the popup window, click on the button **DOWNLOAD THE ZIP**, to download a sample Quarkus application in a ZIP file to your local machine. The downloaded file is named `code-with-quarkus.zip`.\n\n- Unzip the file on your local machine in a directory of your choice. This will create a new directory called `code-with-quarkus` with all the files for the sample Quarkus application.\n\n- From a browser window, open https://gitlab.com, and log in using your GitLab credentials.\n\n- Head over to the GitLab group to which you associated your K8s cluster and create a blank project named `code-with-quarkus`.\n\n![Create project code-with-quarkus](https://about.gitlab.com/images/blogimages/quarkustwo.png){: .shadow.small.center.wrap-text}\nCreate project code-with-quarkus\n{: .note.text-center}\n\n- From a Terminal window on your local machine, change directory to the newly unzipped directory `code-with-quarkus` and execute the command `rm .dockerignore` to delete the `.dockerignore` file that came with the sample Quarkus application. After removing this file, execute the following commands to populate your newly create Git project `code-with-quarkus` with the contents of this directory:\n\n**NOTE:** Depending on your version of git installed on your local machine, the commands below may vary. Keep in mind that the goal of the steps below is to upload the project on your local machine to your newly created GitLab project.\n\n```\ngit init\ngit remote add origin https://gitlab.com/[REPLACE WITH PATH TO YOUR GROUP]/code-with-quarkus.git\ngit add .\ngit commit -m \"Initial commit\"\ngit push --set-upstream origin master\n```\n\nAt this point, you should have your sample Quarkus application in your GitLab project `code-with-quarkus`.\n\n## Modify the generated Dockerfile.jvm file and indicate its location\n\nSince the location of the Dockerfile is not at the root level of the project, we need to create a project variable DOCKERFILE_PATH and set it to `src/main/docker/Dockerfile.jvm` to indicate to the Auto Build job where to find the Dockerfile to build the container image.\n\n- From your `code-with-quarkus` GitLab project window, select **Settings > CI/CD** from the left vertical navigation menu.\n\n- Scroll to the **Variables** section on the screen and click on the **Expand** button on the right hand side of the section.\n\n- Click on the **Add Variable** button and enter the following values for the fields in the popup:\n\n```\nKey = DOCKERFILE_PATH\nValue = src/main/docker/Dockerfile.jvm\nType = Variable\nEnvironment scope = All (default)\nProtect variable Flag = ensure this flag is unchecked\nMask variable Flag = ensure this flag is unchecked\n```\n\nThe variable definition should look as follows:\n\n![Add var dockerfilepath](https://about.gitlab.com/images/blogimages/quarkusthree.png){: .shadow.small.center.wrap-text}\nAdd DOCKERFILE_PATH variable to the your code-with-quarkus project\n{: .note.text-center}\n\n- Click on the **Add variable** button to complete adding this variable to your project\n\nIn order for Auto Build to work, we need to make some minor modifications to the generated Dockerfile.jvm in the sample Quarkus application.\n\n- From your `code-with-quarkus` GitLab project window, navigate to the directory `src/main/docker` and click on the file `Dockerfile.jvm`. Click on the **Edit** button to start making changes to this file.\n\n- At the top of the file, you will see about 77 lines of comments. Replace all the lines following the comments with the following code segment:\n\n```\n####\nFROM openjdk:11 as builder\nRUN mkdir /build\nADD . /build/\n\nWORKDIR /build\nRUN ./mvnw package\n\nFROM registry.access.redhat.com/ubi8/openjdk-11:1.11\n\nENV LANG='en_US.UTF-8' LANGUAGE='en_US:en'\n\n# We make four distinct layers so if there are application changes the library layers can be re-used\nCOPY --from=builder --chown=185 /build/target/quarkus-app/lib/ /deployments/lib/\nCOPY --from=builder --chown=185 /build/target/quarkus-app/*.jar /deployments/\nCOPY --from=builder --chown=185 /build/target/quarkus-app/app/ /deployments/app/\nCOPY --from=builder --chown=185 /build/target/quarkus-app/quarkus/ /deployments/quarkus/\n\nEXPOSE 8080\nUSER 185\nENV AB_JOLOKIA_OFF=\"\"\nENV JAVA_OPTS=\"-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager\"\nENV JAVA_APP_JAR=\"/deployments/quarkus-run.jar\"\n```\n\nThe lines above add a build stage called`builder` to do the Java build using the openjdk:11 image and adds a `build` working directory to the process. The rest of the lines are effectively the same as the original except that we have updated the paths of the `COPY` commands to find the appropriate files under the `build` working directory.\n\n- Click on the **Commit changes** button at the bottom of the **New file** window to create the new file.\n\n## Update the application port number\n\nThe Auto Deploy job of Auto DevOps defaults to port 5000 for applications but the sample Quarkus application uses port 8080. So, we need to override this value in the helm chart for the Auto Deploy job. This is how you do it:\n\n- From your `code-with-quarkus` GitLab project window, click on **New File** from the pop-down menu next to project root name directory as shown below:\n\n![Select new file](https://about.gitlab.com/images/blogimages/quarkusfour.png){: .shadow.small.center.wrap-text}\nSelect New file from your code-with-quarkus project top-level directory\n{: .note.text-center}\n\n- On the **New file** window, enter `.gitlab/auto-deploy-values.yaml` for the name of the new file and paste the following two lines as the content of the file:\n\n```\nservice:\n  internalPort: 8080\n```\n\nYour window should look as follows:\n\n![Update application port number for Auto Deploy](https://about.gitlab.com/images/blogimages/quarkusfive.png){: .shadow.small.center.wrap-text}\nUpdate the application port number in the helm chart for Auto Deploy\n{: .note.text-center}\n\n- Click on the **Commit changes** button at the bottom of the **New file** window to create the new file.\n\n## Update the version of the JDK\n\nThe sample Quarkus application includes a unit test that is automatically run by the Auto Test job, which uses a Java version not compatible with Quarkus resulting in “java.lang.UnsupportedClassVersionError” exceptions. To solve this, we need to adjust the Java runtime version to 11 since this is the lowest version of the JRE supported by Quarkus. Let’s do this:\n\n- From your `code-with-quarkus` GitLab project window, click on **New File** from the pop-down menu next to project root name directory and name the new file `system.properties`. As its contents, paste the following line into it:\n\n```\njava.runtime.version=11\n```\n\n- Click on the **Commit changes** button at the bottom of the **New file** window to create the new file.\n\n## Enable Auto DevOps\n\nLastly, we need to enable Auto DevOps for your `code-with-quarkus` GitLab project.\n\n- From your `code-with-quarkus` GitLab project window, select **Settings > CI/CD** from the left vertical navigation menu.\n\n- Scroll to the **Auto DevOps** section on the screen and click on the **Expand** button on the right hand side of the section.\n\n- In the section, check the **Default to Auto DevOps pipeline** checkbox. Then, for Deployment strategy, select on the radio button **Automatic deployment to staging, manual deployment to production**. Finally, click on the **Save changes** button. Here’s an example screenshot:\n\n![Enable Auto DevOps](https://about.gitlab.com/images/blogimages/quarkussix.png){: .shadow.small.center.wrap-text}\nEnable Auto DevOps for your sample Quarkus project\n{: .note.text-center}\n\nThis will launch an Auto DevOps pipeline that will build, test and deploy your application first to the staging environment and then give you the option to manually deploy to 100% of the production environment. The completed Auto DevOps pipeline should look like this:\n\n![Completed pipeline](https://about.gitlab.com/images/blogimages/completed-pipe.png){: .shadow}\nCompleted Auto DevOps pipeline for a sample Quarkus application in Java mode\n{: .note.text-center}\n\n## Conclusion\n\nThe combination of Quarkus and GitLab can empower your developers and operations teams to collaborate better, spend more time innovating to deliver business value and differentiating capabilities to end users.\n\nIn this article, we showed how to automate the software delivery of a generated Quarkus application in Java mode using GitLab Auto DevOps. Here is [a working sample project](https://gitlab.com/tech-marketing/sandbox/hn/code-with-quarkus) of this Quarkus application, whose delivery has been automated by GitLab Auto DevOps.\n\n\n\n\n\n\n\n\n\n\n",[9,109,267],{"slug":4928,"featured":6,"template":686},"how-to-automate-software-delivery-using-quarkus-and-gitlab","content:en-us:blog:how-to-automate-software-delivery-using-quarkus-and-gitlab.yml","How To Automate Software Delivery Using Quarkus And Gitlab","en-us/blog/how-to-automate-software-delivery-using-quarkus-and-gitlab.yml","en-us/blog/how-to-automate-software-delivery-using-quarkus-and-gitlab",{"_path":4934,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4935,"content":4941,"config":4947,"_id":4949,"_type":14,"title":4950,"_source":16,"_file":4951,"_stem":4952,"_extension":19},"/en-us/blog/how-to-automate-testing-for-a-react-application-with-gitlab",{"title":4936,"description":4937,"ogTitle":4936,"ogDescription":4937,"noIndex":6,"ogImage":4938,"ogUrl":4939,"ogSiteName":670,"ogType":671,"canonicalUrls":4939,"schema":4940},"How to automate testing for a React application with GitLab","Learn how to add React automated tests to a GitLab CI pipeline with this tutorial.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666775/Blog/Hero%20Images/cover.jpg","https://about.gitlab.com/blog/how-to-automate-testing-for-a-react-application-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to automate testing for a React application with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jeremy Wagner\"}],\n        \"datePublished\": \"2022-11-01\",\n      }",{"title":4936,"description":4937,"authors":4942,"heroImage":4938,"date":4944,"body":4945,"category":791,"tags":4946},[4943],"Jeremy Wagner","2022-11-01","\n\nReact is a popular JavaScript library for building user interfaces. In this tutorial, I'll show you \nhow to create a new React application, run unit tests as part of the CI process in GitLab, and output\nthe test results and code coverage into the pipeline.\n\n## Prerequisites\n\nFor this tutorial you will need the following:\n\n- [Node.js](https://nodejs.org/en/) >= 14.0.0 and npm >= 5.6 installed on your system\n- [Git](https://git-scm.com/) installed on your system\n- A [GitLab](https://gitlab.com/-/trial_registrations/new) account\n\n## Getting started\n\nTo get started, [create a new project in GitLab](https://docs.gitlab.com/ee/user/project/working_with_projects.html#create-a-project).\n\nWhen you are on the \"Create new project\" screen, select \"Create blank project.\" Fill out the project information \nwith your project name and details. After you create the project, you will be taken to the project with an empty repository.\n\nNext, we will clone the repository to your local machine. Copy the SSH or HTTPS URL from the \"Clone\" button and run the following\ncommand in the terminal for your working directory:\n\n```\ngit clone \u003Cyour copied URL here>\n```\n\n## Create the React app\n\nYou will create a new React application by using [Create React App](https://reactjs.org/docs/create-a-new-react-app.html#create-react-app).\n\nFrom the terminal `cd` into your newly cloned project directory and run this command:\n\n```\nnpx create-react-app .\n```\n\nThe npx CLI tool will create a new React application inside of your current directory.\n\nTo run the application, run the following command in your terminal:\n\n```\nnpm run start\n```\n\nYou can view the application you created in your browser window at `https://localhost:3000`.\n\n![Create React App home page](https://about.gitlab.com/images/blogimages/2022-11-04-how-to-automate-testing-for-a-react-application-with-gitlab/create-react-app.png){: .shadow}\n\nStop your application by pressing `CTRL` + `c` in your terminal. \n\nPush your new application to GitLab by running the following commands:\n\n```\ngit add -A\ngit commit -m \"Initial creation of React application\"\ngit push\n```\n\n## Testing your application\n\nBy default, Create React App uses [jest](https://jestjs.io/) as the test runner and one unit test to run.\n\n```javascript\nimport { render, screen } from '@testing-library/react';\nimport App from './App';\n\ntest('renders learn react link', () => {\n  render(\u003CApp />);\n  const linkElement = screen.getByText(/learn react/i);\n  expect(linkElement).toBeInTheDocument();\n});\n```\n\nInside your `package.json`, you should see that it comes with several scripts.\n\n```javascript\n\"scripts\": {\n    \"start\": \"react-scripts start\",\n    \"build\": \"react-scripts build\",\n    \"test\": \"react-scripts test\",\n    \"eject\": \"react-scripts eject\",\n  }\n```\n\nUse the test script to run the tests in your application by running the following command:\n\n```\nnpm run test\n```\n\nWhen prompted for \"Watch Usage,\" press `a` to run all of the tests. You will see that the existing test passes and it continues to watch for changes.\n\n![CLI passing tests](https://about.gitlab.com/images/blogimages/2022-11-04-how-to-automate-testing-for-a-react-application-with-gitlab/passing-test-cli.png){: .shadow}\n\nFor local development, watching for changes to run the tests is great; however, for our CI pipeline we would like to run the tests once, \ncreate a report so that we can see the results inside of our pipeline, and also determine the code coverage.\n\nExit the jest test watcher by pressing `CTRL` + `c` in your terminal. \n\n## Add unit test reporting and coverage\n\nTo view the unit test report, GitLab requires the runner to upload a JUnit report format XML file.\nWe will use `jest-junit` to generate this file. This is a unit test report for jest and will create an XML\nfile in the right format.\n\nTo install `jest-junit`, run the following command in your terminal:\n\n```\nnpm install --save-dev jest-junit\n```\n\nNow, add a new script to run the unit tests inside of your CI pipeline.\nAdd a `test:ci` script to your `package.json` that looks like this:\n\n```javascript\n\"scripts\": {\n    \"start\": \"react-scripts start\",\n    \"build\": \"react-scripts build\",\n    \"test\": \"react-scripts test\",\n    \"eject\": \"react-scripts eject\",\n    \"test:ci\": \"npm run test -- --testResultsProcessor=\\\"jest-junit\\\" --watchAll=false --ci --coverage\"\n  },\n```\n\n`--testResultsProcessor=\\\"jest-junit\\\"` tells jest to use the `jest-junit` library to create a unit test \nreport. `--watchAll=false` disables watch mode so that the tests will not rerun when something changes. `--ci` tells \nJest that it is running in a CI environment. `--coverage` tells Jest that test coverage information should be collected \nand reported in the output. For more information on these options, visit the [jest CLI options](https://jestjs.io/docs/cli) documentation.\n\n\nIf you run the new `test:ci` script, it will run the tests and create an XML file named `junit.xml` and print coverage statistics to the CLI.\n\n\n![CLI code coverage](https://about.gitlab.com/images/blogimages/2022-11-04-how-to-automate-testing-for-a-react-application-with-gitlab/coverage-cli.png){: .shadow}\n\n## Add unit tests to your CI pipeline\n\nIn the root of your application, create a file named `.gitlab-ci.yml`. \n\nDefine a test stage for your pipeline by adding the following code to your `.gitlab-ci.yml` file:\n\n```\nstages:\n  - test\n```\n\nNext, add a job named `unit-test` that will be responsible for running the unit tests in the test stage. Add the following code below the\ndefined stages:\n\n```\nunit-test:\n  image: node:latest\n  stage: test\n  before_script:\n    - npm install\n  script:\n    - npm run test:ci\n  coverage: /All files[^|]*\\|[^|]*\\s+([\\d\\.]+)/\n  artifacts:\n    paths:\n      - coverage/\n    when: always\n    reports:\n      junit:\n        - junit.xml\n```\n\nYour complete `.gitlab-ci.yml` file should look like this:\n\n```\nstages:\n  - test\n\nunit-test:\n  image: node:latest\n  stage: test\n  before_script:\n    - npm install\n  script:\n    - npm run test:ci\n  coverage: /All files[^|]*\\|[^|]*\\s+([\\d\\.]+)/\n  artifacts:\n    paths:\n      - coverage/\n    when: always\n    reports:\n      junit:\n        - junit.xml\n```\n\n\nBefore we push these changes to GitLab, add the following line to your `.gitignore`:\n\n```\njunit.xml\n```\n\nAdd your changes to GitLab by running these commands in your terminal:\n\n```\ngit add -a\ngit commit -m \"Adds .gitlab-ci.yml with unit testing\"\ngit push\n```\n\nWhen this command finishes, your code will be pushed to your project in GitLab and a pipeline will start \nautomatically running the `unit-test` job we defined earlier.\n\n![CI pipeline running](https://about.gitlab.com/images/blogimages/2022-11-04-how-to-automate-testing-for-a-react-application-with-gitlab/ci-pipeline-starting.png){: .shadow}\n\nWhen the pipeline completes, click the pipeline ID (_#680073569 in this case_).\n\nInside the pipeline, click the _Jobs_ tab and you should see the coverage for the unit-test job is 8.33%.\n\n![CI pipeline coverage](https://about.gitlab.com/images/blogimages/2022-11-04-how-to-automate-testing-for-a-react-application-with-gitlab/ci-pipeline-coverage.png){: .shadow}\n\nClick the _Tests_ tab and you can see the testing results for the unit-test job. \n\n![CI pipeline tests](https://about.gitlab.com/images/blogimages/2022-11-04-how-to-automate-testing-for-a-react-application-with-gitlab/ci-pipeline-tests.png){: .shadow}\n\nClick the name of the job _unit-test_ and you will see the status for each of the test suites run.\n\n![CI pipeline test details](https://about.gitlab.com/images/blogimages/2022-11-04-how-to-automate-testing-for-a-react-application-with-gitlab/ci-pipeline-test-details.png){: .shadow}\n\nCongratulations! You just added automated tests for your React application to your CI pipeline inside of GitLab and output the results to the pipeline.\n\nAll code for this tutorial can be found in this [project](https://gitlab.com/jwagner-demo/vandelay-industries/engineering/react-app).\n\nCover image by [Lautaro Andreani](https://unsplash.com/@lautaroandreani?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/react?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n\n## Related Posts\n- [The GitLab guide to modern software testing](https://about.gitlab.com/blog/the-gitlab-guide-to-modern-software-testing/)\n- [Unit Test Reports](https://docs.gitlab.com/ee/ci/testing/unit_test_reports.html)\n- [coverage keyword](https://docs.gitlab.com/ee/ci/yaml/#coverage)\n",[1158,9,109],{"slug":4948,"featured":6,"template":686},"how-to-automate-testing-for-a-react-application-with-gitlab","content:en-us:blog:how-to-automate-testing-for-a-react-application-with-gitlab.yml","How To Automate Testing For A React Application With Gitlab","en-us/blog/how-to-automate-testing-for-a-react-application-with-gitlab.yml","en-us/blog/how-to-automate-testing-for-a-react-application-with-gitlab",{"_path":4954,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4955,"content":4961,"config":4967,"_id":4969,"_type":14,"title":4970,"_source":16,"_file":4971,"_stem":4972,"_extension":19},"/en-us/blog/how-to-build-out-your-devops-team",{"title":4956,"description":4957,"ogTitle":4956,"ogDescription":4957,"noIndex":6,"ogImage":4958,"ogUrl":4959,"ogSiteName":670,"ogType":671,"canonicalUrls":4959,"schema":4960},"How to build out your DevOps team","Hiring the right DevOps roles put you on the path to success. ","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664007/Blog/Hero%20Images/devopsroles.jpg","https://about.gitlab.com/blog/how-to-build-out-your-devops-team","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to build out your DevOps team\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Johanna Ambrosio\"}],\n        \"datePublished\": \"2022-01-25\",\n      }",{"title":4956,"description":4957,"authors":4962,"heroImage":4958,"date":4964,"body":4965,"category":769,"tags":4966},[4963],"Johanna Ambrosio","2022-01-25","\nGetting started with modern software development can feel overwhelming, particularly if you're trying to build a DevOps team from scratch. Hiring the right DevOps roles may require a blend of art, science, and luck, but it is doable. Here's our best advice on key DevOps roles, and the skills each position needs to make your DevOps team function like a well-oiled machine.\n\n- **Developers:** DevOps is a team sport nowadays. Devs test code, act as [security champions](/blog/why-security-champions/), provision infrastructure, and write automation scripts… just to name a few of the job requirements. They use scrum, Kanban, or other Agile methods to work in short iterations with regular feedback from the business side or from other clients. The dev role has changed dramatically over the past few years and will likely continue to adopt elements of other roles from UX to business-side subject matter expert. They want to continue to stretch themselves, so keep that in mind. In our [2021 Global DevSecOps Survey](/developer-survey/), developers said understanding AI/ML is the most important skill for their future careers.   \n\n- **Operations engineer/systems administrator:** In Olden Times, this is the person who ensured the software could and did run smoothly in production and sent out alarms if it didn't. But on a DevOps team, ops will manage the cloud, help create monitoring and analytics that are integrated into code, manage the tools, deal with the tools, and, of course, help resolve problems. Like the dev role, operations pros need new and emerging skills to stay relevant, including advanced programming languages, subject matter expertise, and a deeper understanding of security, according to our survey.\n\n- **Evangelist:** Someone needs to make sure the rest of the company knows what your team is up to, sing its praises, and communicate what the business's most pressing needs are. Ideally, this is a senior-level person who sits on the company's Executive Committee or board. More than just a cheerleader, an evangelist on a DevOps team should get everyone in the company involved in DevOps, committed to its success, and happy to spend budget on the endeavor. \n\n- **Project manager/release manager:** This DevOps role tracks the team's progress against business objectives, sets goals and timelines, and tries to keep everything running on time. Solving problems with cost, project scope, schedule, and client satisfaction are also squarely in this job description.\n\n\n- **QA tester/automation engineer:** A testing professional plays a critical role on a DevOps team, even with the advent of \"devs who test\" and test automation. Testing pros look at the big picture of the entire software pipeline and at snippets of code. From choosing or creating the right tests to driving test automation, this DevOps role needs out-of-the-box thinking, flexibility, and the ability to pivot at a moment's notice. \n\n\n- **Security engineer:** It's critical to build in security and compliance from the start, rather than trying to tack it on at the end when fixing problems becomes most expensive. A security engineer on a DevOps team must be strategic and hands-on. Security has a lingering tarnished reputation as a top-down problem that devs literally don't have the tools to solve, but are asked to. So for this DevOps role, it's critical to hire someone who can meet dev and ops where they are, explain the challenges and technologies, and work together collegially.\n\n- **User experience (UX) professional:** This DevOps role is the end-user advocate, the person who is totally focused on how the software looks and works from the client's perspective. Think of the UX pro as the person who brings the client and the client's needs right into the development process. In this era of modern software development, [a UX role](/blog/the-evolution-of-ux-at-gitlab/) is a must-have rather than a nice-to-have.\n\nThose are just the \"getting started\" DevOps roles. Other titles to consider include a site reliability engineer or a DevOps platform engineer, an infrastructure engineer, project and product managers, systems engineers and architects, and software architects. Keep in mind that, especially now with the Great Resignation, [hiring talent for any of these DevOps roles](/blog/have-devops-jobs-to-fill-try-these-3-strategies-to-hire-and-retain/), and pretty much anything IT-related in general, can take months.\n\nReskilling is an excellent option, though. The DevOps Institute [offers trainings](https://www.devopsinstitute.com/skilup-days/), which it calls SKILup Days, on topics such as site reliability engineering and how to create a CI/CD pipeline. And when thinking about reskilling, don't forget [the importance of soft skills to a DevOps team](/blog/soft-skills-are-the-key-to-your-devops-career-advancement/). If ever there's a place where collaboration and communication matter, it's in DevOps.\n\n_Johanna Ambrosio is a freelance technology writer._\n\nCover image by Hans-Peter Gauster on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[9,813,749],{"slug":4968,"featured":6,"template":686},"how-to-build-out-your-devops-team","content:en-us:blog:how-to-build-out-your-devops-team.yml","How To Build Out Your Devops Team","en-us/blog/how-to-build-out-your-devops-team.yml","en-us/blog/how-to-build-out-your-devops-team",{"_path":4974,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4975,"content":4981,"config":4986,"_id":4988,"_type":14,"title":4989,"_source":16,"_file":4990,"_stem":4991,"_extension":19},"/en-us/blog/how-to-build-reusable-ci-templates",{"title":4976,"description":4977,"ogTitle":4976,"ogDescription":4977,"noIndex":6,"ogImage":4978,"ogUrl":4979,"ogSiteName":670,"ogType":671,"canonicalUrls":4979,"schema":4980},"How to build more reusable CI/CD templates","Users can now define inputs to any includable CI/CD templates. Learn how and see what other CI/CD pipeline developments are coming.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682709/Blog/Hero%20Images/pexels-mathias-reding-4386148.jpg","https://about.gitlab.com/blog/how-to-build-reusable-ci-templates","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to build more reusable CI/CD templates\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2023-05-01\",\n      }",{"title":4976,"description":4977,"authors":4982,"heroImage":4978,"date":4983,"body":4984,"category":791,"tags":4985},[2022],"2023-05-01","\n\nThere are exciting new developments to share about our CI/CD templates features, known for their ability to get users up and running quickly with [GitLab CI/CD](/topics/ci-cd/). Our goals for the immediate future are to evolve templates into CI/CD components (more details below) and, soon, to release a CI/CD components catalog to make the reusing and sharing of pipeline configurations easier and more efficient for developers, both inside of their organizations and with the wider developer community. The first step in our journey is to enable users to define inputs to any includable file, ultimately creating more powerful and reusable CI/CD templates.\n\nHere is a short walkthrough on this capability: \n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe width=\"1870\" height=\"937\" src=\"https://www.youtube.com/embed/4ZRdgBy1n5E\" title=\"\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\u003C/figure>\n\n\n## Build more robust and isolated CI/CD templates\nIn GitLab 15.11, we released, as Beta, the ability to define inputs to any includable file (any CI/CD file that you include in your pipeline). Until now, we've been leveraging environment variables to pass information. As an example, we used environment variables to pass information from an upstream pipeline to a downstream pipeline.\n\nUsing environment variables for passing information is like declaring global variables in programming languages – it has an effect on your entire pipeline, which means that the more variables we declare, the more we risk variable conflicts and increased variable scope.\n\nInput parameters are similar to variables passed to the template but exist only inside a specific scope and don't affect other templates in your pipelines. There are several benefits of using inputs, including:\n1. Inputs are not inherited from upstream includes and must be passed explicitly, which means they will never affect your entire pipeline. \n2. Inputs have full support for CI/CD interpolation, which means you have complete flexibility to \"templatize\" your pipeline and use `$[[ inputs.* ]]` across all keywords in your CI/CD configuration. \n3. You can define mandatory and optional inputs to be used as part of your CI/CD templates.\n4. You can define a default value for inputs. \n \nThis paradigm allows users to build more robust and isolated templates (which will soon evolve into components) and enables users to declare and enforce contracts. \n\n### Add your inputs and let us know what you think! \nThe ability to define inputs to a CI/CD configuration file is available right now and we'd love for users to dive in and begin adding inputs to templates. You can check out [the GitLab docs](https://docs.gitlab.com/ee/ci/yaml/includes.html#define-input-parameters-with-specinputs) and review [this example project](https://gitlab.com/grzesiek/ci-interpolation-example) to better understand how to use inputs as part of your daily workflow. If you use this feature and have feedback, please share it with us in this [issue](https://gitlab.com/gitlab-org/gitlab/-/issues/407556).\n\n## What's next in CI/CD pipelines?\nIn GitLab 16.0, we are planning to release an experimental version of a CI/CD pipeline component, which will be the first building block of our CI/CD catalog. A pipeline component is a reusable, single-purpose building block that abstracts a single pipeline configuration unit away. To learn more, please check out this [example project](https://gitlab.com/gitlab-test-ci-catalog/catalog/ruby). \n\n### Why are we moving to components?\nComponents are preconfigured CI/CD files that automate the process of building, testing, and deploying software applications. CI/CD components provide:\n* **Versioning**: Each component is tagged with a version number, so you can reference a specific version or always use the `~latest` version.\n* **Consistency**: CI/CD components ensure consistency in your CI/CD pipelines across different projects, teams, and environments. By using a standardized approach, developers can reduce errors and improve the quality of their code.\n* **Time-savings**: CI/CD components save time by automating repetitive tasks such as running tests, building artifacts, and deploying applications. This enables developers to focus on more important tasks, like writing code and fixing bugs.\n* **Reusability**: CI/CD components can be reused across multiple projects and teams, eliminating the need to create custom scripts for each project. This saves time and reduces the risk of errors.\n* **Scalability**: CI/CD components are scalable and can be used to manage pipeline processes of large and complex applications. This enables developers to easily manage their projects as they grow.\n* **Flexibility**: CI/CD components are highly customizable and can be adapted to suit the needs of different projects, teams, and environments. This allows developers to use the tools and processes that work best for them.\n\nTL;DR: Using CI/CD components can help streamline the development process, save time, reduce errors, and improve the quality of code.\n\n### On the horizon: A CI/CD component catalog\nTo further streamline your development processes, improve the quality of your software delivery, and make it easier for developers to discover and use preconfigured components, we’ll be releasing the CI/CD component catalog, which will make using, creating, and sharing CI/CD components much more efficient and user-friendly, and we’re targeting release of this later this year. In the next months, we’ll be sharing more feature updates, blogs, docs, and demos to keep you posted on our journey toward CI/CD components and a CI/CD component catalog. We’re excited for you to test out the new capabilities as they drop, and we look forward to your feedback.  \n\nCover image by [Mathias Reding](https://www.pexels.com/@matreding/) on [Pexels](https://www.pexels.com/photo/background-of-abstract-modern-architectural-pattern-4386148/).\n{: .note}\n",[109,9],{"slug":4987,"featured":6,"template":686},"how-to-build-reusable-ci-templates","content:en-us:blog:how-to-build-reusable-ci-templates.yml","How To Build Reusable Ci Templates","en-us/blog/how-to-build-reusable-ci-templates.yml","en-us/blog/how-to-build-reusable-ci-templates",{"_path":4993,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4994,"content":4999,"config":5004,"_id":5006,"_type":14,"title":5007,"_source":16,"_file":5008,"_stem":5009,"_extension":19},"/en-us/blog/how-to-code-build-and-deploy-from-an-ipad-using-gitlab-and-gitpod",{"title":4995,"description":4996,"ogTitle":4995,"ogDescription":4996,"noIndex":6,"ogImage":3500,"ogUrl":4997,"ogSiteName":670,"ogType":671,"canonicalUrls":4997,"schema":4998},"How to code, build, and deploy from an iPad using GitLab and Gitpod","Senior Developer Evangelist Brendan O'Leary tackles the challenge of doing DevOps from a tablet.","https://about.gitlab.com/blog/how-to-code-build-and-deploy-from-an-ipad-using-gitlab-and-gitpod","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to code, build, and deploy from an iPad using GitLab and Gitpod\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2022-02-10\",\n      }",{"title":4995,"description":4996,"authors":5000,"heroImage":3500,"date":5001,"body":5002,"category":791,"tags":5003},[766],"2022-02-10","\n\nAs a software engineer, it can be tough to go all-in on just using an iPad for your daily driver. So when Apple announced the M1 chip-based iPads, I, along with many techies, got excited to see if we'd finally get things like a proper terminal on the iPad. But that still isn't the use case that the iPad solves. I remained determined to be able to *code* from mine. So I hooked up my magic keyboard and fired up Gitpod to code and GitLab to build and deploy an app from scratch... all from my iPad.\n\n## Getting started\n\nLike any of [my projects](/blog/introducing-auto-breakfast-from-gitlab/), the first thing I needed was inspiration. I had promised my colleague [Pj](https://brendan.fyi/pj) for some time that I would review [his blog](https://brendan.fyi/pj-twitter-blog) on how to make a Twitter bot like all of the fantastic ones he built while breaking into tech. Combine the need to learn the Twitter API to provide an excellent review with my love of Elton John's music, and I had it: I'd make a Twitter bot that tweeted every morning at 4:00 am (as an homage to the line in “Someone Saved My Life Tonight”).\n\nArmed with my newfound inspiration, I ran to gitlab.com in Safari (on my iPad, obviously) and created a new, blank GitLab project.\n\n![ipad on desk](https://about.gitlab.com/images/blogimages/brendanipad1.png){: .shadow}\n\n## Coding on the iPad\n\nOnce I had the new project, getting started on Gitpod was as easy as clicking the \"Gitpod\" button on GitLab to open my repository in Gitpod.\n\nGitpod enables you to access an entire development environment from any browser. By default, you get a container with many development tools (Node, Ruby, OpenJDK, etc.). But you can also choose [your own container](https://www.gitpod.io/docs/config-docker) as a starting point with a .gitpod.yml… but we'll talk about that later.\n\nThe environment is presented to you as a VS Code interface – where you can open, edit, and add files just as you'd expect. You can also access the terminal just like you would in VS Code and install anything you might need to get your project running.\n\nIn this example, I decided to build the Twitter bot in Node.js, so I initialized a new Node project and installed the packages I'd need with:\n\n```bash\nnpm init -y\nnpm install express twit node-schedule dotenv\n```\n\n## Running your app\n\nOnce I had some code running – just the [Express sample app](https://expressjs.com/en/starter/hello-world.html) that says Hello World – running the app was just as easy as if I was going to run it on my laptop:\n\n```bash\nnpm dev\n```\nNot only did that run my code to connect to the Twitter API, wait until 4:00 a.m. (UTC), and then tweet to let everyone know it was 4:00 a.m., but it also shows this relative to my Express app:\n\n![Express app](https://about.gitlab.com/images/blogimages/brendanipad3.png){: .shadow}\n\nThat allows me to preview my [website for the app](https://brendan.fyi/4oclock) while I'm coding it. This is a massive benefit because it means I can have two tabs open on the iPad – one with Gitpod and my code and another with the website as I change it. Or I can even use split-screen on the iPad to have them side-by-side like I might if I was at my desk at my \"normal\" setup. And there's even a button to make the site available publically so I could share it with my team and show them what I'm working on (as long as my Gitpod workspace is running).\n\nNow, when it comes to coding the rest of the Twitter bot, I used the previously mentioned [tutorial](https://brendan.fyi/pj-twitter-blog) from my colleague [Pj](https://brendan.fyi/pj). So I won't go into detail on the actual coding of the app – you can find the [code](https://gitlab.com/brendan-demo/4oclock), [website](https://brendan.fyi/4oclock), and [Twitter bot](https://twitter.com/DammitOclock) if you want to learn more about the app itself. But to deploy the website and the bot, I needed something else: [GitLab CI/CD](https://docs.gitlab.com/ee/ci/).\n\n## Deploying the app\n\nCombining GitLab CI/CD and GitLab.com's SaaS offering with Gitpod meant that I could not only code and preview the app from my iPad, but I could also get it deployed to Heroku (or any provider) from the couch. \n\nI created a `.gitlab-ci.yml` file in my project to get started. For deploying to Heroku:\n\n- I like to use a Ruby package called [dpl](https://github.com/travis-ci/dpl) from Travis CI because it makes it a simple one-line command.  Alternatively, I could install the [Heroku CLI](https://devcenter.heroku.com/articles/heroku-cli) and deploy with that if I wanted to. \n\n- I added the `HEROKU_API_KEY` variable to my [CI/CD variables](https://docs.gitlab.com/ee/ci/variables/#add-a-cicd-variable-to-a-project) so that I could authenticate with Heroku for the deployment. \n\n- I then set the `rules:` section to only deploy when commits are impacting the main (default) branch, and I was ready to go! \n\nNow, every time I push code from Gitpod to GitLab, GitLab will start the build and deploy it to Heroku:\n\n```yaml\nimage: starefossen/ruby-node:2-10\n\nvariables:\n APP_NAME: four-oclock-in-the-morning\n\ndeploy:\n stage: deploy\n script:\n - gem install dpl -v 1.10.6\n - dpl --provider=heroku --app=$APP_NAME --api-key=$HEROKU_API_KEY\n rules:\n - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH\n```\n\n## Enabling collaboration\n\nThere are two other concepts that this pattern introduces that are worth discussion: the idea of one environment per change and enabling new collaborators to spin up a development environment in seconds.\n\nMost developers are used to having our setup just the way we like it – precisely the correct number of monitors, keys on our keyboard, and all of our favorite tools installed. However, that can lead to issues. We already know we should treat our servers like cattle, not pets, so why do we still treat our laptops like pets? While I love my MacBook and the stickers on it as much as the next person, I can get frustrated when setting up a new one and getting it back to the way I like it.\n\nIn addition, on many projects I've been on in the past, onboarding a new developer can take a lot of effort, including getting the correct libraries installed and ensuring they have access to all the right resources and environments. These things may seem trivial, but I've seen it take up to three days from senior engineers just to get another engineer up and running. All of that time is time that could be much better spent on writing code for the actual business.\n\nGitpod solves both of these issues with a simple YAML file: `.gitpod.yml`. This file allows you to specify:\n\n- What image to use as the base for the environment\n- Which other tools to install\n- What commands to run at startup, and even things like which VSCode extensions you should have in the environment\n\nAnd [lots of different settings](https://www.gitpod.io/docs/references/gitpod-yml) that you can find in the [Gitpod docs](https://www.gitpod.io/docs).\n\nSpecifying all of the tools needed lets you have short-lived environments that you can spin up for one task and then discard and get a fresh one for the next task. And it also saves time when onboarding new engineers by guaranteeing they have a running system within just a few seconds of opening the project. Best of all, it is all in a file that's in source control, so as things change or you make improvements to the development environment, all of your developers benefit from it immediately.  \n\nI added a simple [`.gitpod.yml`](https://gitlab.com/brendan-demo/4oclock/-/blob/main/.gitpod.yml) to run `npm run dev` to get started when you create a new environment. That simple example is great for a simple Node app or similar, but what about something more complex? Gitpod works for that, too. GitLab itself has a [`gitpod.yml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/.gitpod.yml) that lets you get an entire working GitLab development environment – and all that entails – up and running quickly, without the need to install Postgres and Redis and all of the other dependencies GitLab has.\n\nThis makes contributing to GitLab easier than ever. Just go to the [GitLab repository](https://brendan.fyi/gitlab-repo) and click on that Gitpod button to get started. I'd love to hear how it works for you!\n",[9,978,749],{"slug":5005,"featured":6,"template":686},"how-to-code-build-and-deploy-from-an-ipad-using-gitlab-and-gitpod","content:en-us:blog:how-to-code-build-and-deploy-from-an-ipad-using-gitlab-and-gitpod.yml","How To Code Build And Deploy From An Ipad Using Gitlab And Gitpod","en-us/blog/how-to-code-build-and-deploy-from-an-ipad-using-gitlab-and-gitpod.yml","en-us/blog/how-to-code-build-and-deploy-from-an-ipad-using-gitlab-and-gitpod",{"_path":5011,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5012,"content":5018,"config":5023,"_id":5025,"_type":14,"title":5026,"_source":16,"_file":5027,"_stem":5028,"_extension":19},"/en-us/blog/how-to-continously-test-web-apps-apis-with-hurl-and-gitlab-ci-cd",{"title":5013,"description":5014,"ogTitle":5013,"ogDescription":5014,"noIndex":6,"ogImage":5015,"ogUrl":5016,"ogSiteName":670,"ogType":671,"canonicalUrls":5016,"schema":5017},"How to continuously test web apps and APIs with Hurl and GitLab CI/CD","Hurl as a CLI tool can be integrated into the DevSecOps platform to continuously verify, test, and monitor targets. It also offers integrated unit test reports in GitLab CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659883/Blog/Hero%20Images/post-cover-image.jpg","https://about.gitlab.com/blog/how-to-continously-test-web-apps-apis-with-hurl-and-gitlab-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to continuously test web apps and APIs with Hurl and GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2022-12-14\",\n      }",{"title":5013,"description":5014,"authors":5019,"heroImage":5015,"date":5020,"body":5021,"category":791,"tags":5022},[2473],"2022-12-14","\nTesting websites, web applications, or generally everything reachable with the HTTP protocol, can be a challenging exercise. Thanks to tools like `curl` and `jq`, [DevOps workflows have become more productive](/blog/devops-workflows-json-format-jq-ci-cd-lint/) and even simple monitoring tasks can be automated with CI/CD pipeline schedules. Sometimes, use cases require specialized tooling with custom HTTP headers, parsing expected responses, and building end-to-end test pipelines. Stressful incidents also need good and fast tools that help analyze the root cause and quickly mitigate and fix problems.\n\n[Hurl](https://hurl.dev) is an open-source project developed and maintained by Orange, and uses libcurl from curl to provide HTTP test capabilities. It aims to tackle complex HTTP test challenges by providing a simple plain text configuration to describe HTTP requests. It can chain requests, capture values, and evaluate queries on headers and body responses. So far, so good: Hurl does not only support fetching data, it can be used to test HTTP sessions and XML (SOAP) and JSON (REST) APIs.\n\n## Getting Started\n\nHurl comes in various package formats to [install](https://hurl.dev/docs/installation.html). On macOS, a Homebrew package is available.\n\n```sh\n$ brew install hurl\n```\n\n## First steps with Hurl\n\nHurl proposes to start with the configuration file format first, which is a great way to learn the syntax step by step. The following example creates a new `gitlab-contribute.hurl` configuration file that will do two things: execute a GET HTTP request on `https://about.gitlab.com/community/contribute/` and check whether its HTTP response contains the HTTP protocol `2` and status code `200` (OK).\n\n```sh\n$ vim gitlab-contribute.hurl\n\nGET https://about.gitlab.com/community/contribute/\n\nHTTP/2 200\n$ hurl --test gitlab-contribute.hurl\ngitlab-contribute.hurl: Running [1/1]\ngitlab-contribute.hurl: Success (1 request(s) in 413 ms)\n--------------------------------------------------------------------------------\nExecuted files:  1\nSucceeded files: 1 (100.0%)\nFailed files:    0 (0.0%)\nDuration:        415 ms\n```\n\nInstead of creating configuration files, you can also use the `echo “...” | hurl` command pattern. The following command tests against about.gitlab.com and checks whether the HTTP response protocol is 1.1 and the status is OK (200). The two newline characters `\\n` are required for separation.\n\n```sh\n$ echo \"GET https://about.gitlab.com\\n\\nHTTP/1.1 200\" | hurl --test\n```\n\n![hurl CLI run against about.gitlab.com, failed request](https://about.gitlab.com/images/blogimages/hurl-continuous-website-testing/hurl_assert_failure.png)\n\nThe command failed, and it says that the response protocol version is actually `2`. Let's adjust the test run to expect `HTTP/2`:\n\n```sh\necho \"GET https://about.gitlab.com\\n\\nHTTP/2 200\" | hurl --test\n```\n## Asserting HTTP responses\n\nHurl allows defining [assertions](https://hurl.dev/docs/asserting-response.html) to control when the tests fail. These can be defined for different HTTP response types:\n\n- Expected HTTP protocol version and status\n- Headers\n- Body\n\nThe configuration language allows users to define queries with predicates that allow to compare, chain, and execute different assertions.\n\nThis is the easiest way to verify that the HTTP response contains what is expected to be a string or sentence on the website, for example. If the string does not exist, this can indicate that it was changed unexpectedly, or that the website is down. Let's revisit the example with testing GET https://about.gitlab.com/community/contribute/ and add an expected string `Everyone can contribute` as a new assertion, `body contains \u003Cstring>` is the expected configuration syntax for [body asserts](https://hurl.dev/docs/asserting-response.html#body-assert).\n\n```sh\n$ vim gitlab-contribute.hurl\n\nGET https://about.gitlab.com/community/contribute/\n\nHTTP/2 200\n\n[Asserts]\nbody contains \"Everyone should contribute\"\n\n$ hurl --test gitlab-contribute.hurl\n```\n\n**Exercise:** Fix the test by updating the asserts line to `Everyone can contribute` and run Hurl again.\n\n### Asserting responses: JSON and XML\n\n[JSONPath](https://hurl.dev/docs/asserting-response.html#jsonpath-assert) automatically parses the JSON response (a built-in `jq with curl` parser so to speak), and allows users to compare the value to verify the asserts (more below). The XML format can be found in an [RSS feed on about.gitlab.com](https://about.gitlab.com/atom.xml) and parsed using [XPath](https://hurl.dev/docs/asserting-response.html#xpath-assert). The following example from `atom.xml` should be verified with Hurl:\n\n```xml\n\u003Cfeed xmlns=\"http://www.w3.org/2005/Atom\">\n\u003Ctitle>GitLab\u003C/title>\n\u003Cid>https://about.gitlab.com/blog\u003C/id>\n\u003Clink href=\"https://about.gitlab.com/blog/\"/>\n\u003Cupdated>2022-11-21T00:00:00+00:00\u003C/updated>\n\u003Cauthor>\n\u003Cname>The GitLab Team\u003C/name>\n\u003C/author>\n\u003Centry>\n...\n\u003C/entry>\n\u003Centry>\n...\n\u003C/entry>\n\u003Centry>\n…\n```\n\nIt is important to note that XML namespaces need to be specified for parsing. Hurl allows users to replace the first default namespace with the `_` character to avoid adding `http://www.w3.org/2005/Atom` everywhere, the XPath is now shorter with `string(//_:feed/_:entry)` to get a list of all entries. This value is captured in the `entries` variable, which can be compared to match a specific string, `GitLab` in this example. Additionally, the feed id and author name is checked.\n\n```\n$ vim gitlab-rss.hurl\n\nGET https://about.gitlab.com/atom.xml\n\nHTTP/2 200\n\n[Captures]\nentries: xpath \"string(//_:feed/_:entry)\"\n\n[Asserts]\nvariable \"entries\" matches \"GitLab\"\n\nxpath \"string(//_:feed/_:id)\" == \"https://about.gitlab.com/blog\"\nxpath \"string(//_:feed/_:author/_:name)\" == \"The GitLab Team\"\n\n$ hurl –test gitlab-rss.hurl\n```\n\nHurl allows users to capture the value from responses into [variables](https://hurl.dev/docs/templates.html#variables) that can be used later. This method can also be helpful to model end-to-end testing workflows: First, check the website health status and retrieve a CSRF token, and then try to log into the website by sending the token again.\n\nREST APIs that are expected to always return a specified field, or monitoring a website health state [becomes a breeze using Hurl](https://hurl.dev/docs/tutorial/chaining-requests.html#test-rest-api).\n\n## Use Hurl in GitLab CI/CD jobs\n\nThe easiest way to integrate Hurl into GitLab CI/CD is to use the official container image. The Hurl project provides a [container image on Docker Hub](https://hub.docker.com/r/orangeopensource/hurl), which did not work in CI/CD at first glance. After talking with the maintainers, the [entrypoint override](https://docs.gitlab.com/ee/ci/docker/using_docker_images.html#override-the-entrypoint-of-an-image) was identified as a solution for using the image in GitLab CI/CD. Note that the Alpine based image uses the libcurl library that does not support HTTP/2 yet - the test results are different to a Debian base image (follow [this issue report](https://github.com/Orange-OpenSource/hurl/issues/1082) for the problem analysis).\n\nThe following example is kept short to run the container image, override the entrypoint, and run Hurl with passing in the test using the `echo` CLI command.\n\n```yaml\nhurl-standalone:\n  image:\n    name: ghcr.io/orange-opensource/hurl:latest\n    entrypoint: [\"\"]\n  script:\n    - echo -e \"GET https://about.gitlab.com/community/contribute/\\n\\nHTTP/1.1 200\" | hurl --test --color\n```\n\nThe Hurl test report is printed into the CI/CD job trace log, and returns succesfully.\n\n```sh\n$ echo -e \"GET https://about.gitlab.com/community/contribute/\\n\\nHTTP/1.1 200\" | hurl --test --color\n-: Running [1/1]\n-: Success (1 request(s) in 280 ms)\n--------------------------------------------------------------------------------\nExecuted files:  1\nSucceeded files: 1 (100.0%)\nFailed files:    0 (0.0%)\nDuration:        283 ms\nCleaning up project directory and file based variables\n00:00\nJob succeeded\n```\n\nThe next iteration is to create a CI/CD job template that provides generic attributes, and allows users to dynamically run the job with an environment variable called `HURL_URL`.\n\n```yaml\n# Hurl job template\n.hurl-tmpl:\n  # Use the upstream container image and override the ENTRYPOINT to run CI/CD script\n  # https://docs.gitlab.com/ee/ci/docker/using_docker_images.html#override-the-entrypoint-of-an-image\n  image:\n    name: ghcr.io/orange-opensource/hurl:1.8.0\n    entrypoint: [\"\"]\n  variables:\n    HURL_URL: \"about.gitlab.com/community/contribute/\"\n  script:\n    - echo -e \"GET https://${HURL_URL}\\n\\nHTTP/1.1 200\" | hurl --test --color\n\nhurl-about-gitlab-com:\n  extends: .hurl-tmpl\n  variables:\n    HURL_URL: \"about.gitlab.com/jobs/\"\n```\n\nRunning GET commands with expected HTTP results is not the only use case, and the Hurl maintainers thought about this already. The next section explains how to create a custom container image; you can skip to the [DevSecOps workflows](#devSecOps-workflows-with-hurl) section to learn more about efficient Hurl configuration use cases.\n\n### Custom container image with Hurl\n\nMaintaining and building a custom container image adds more work, but also helps with avoiding running unknown container images in CI/CD pipelines. The latter is often a requirement for compliance and security. _Since the Hurl Debian package supports detecting HTTP/2 as a protocol, this blog post will focus on building a custom image, and run all tests using this image. If you plan on using the upstream container image, make sure to review the test configuration for the HTTP protocol version detection._\n\nThe Hurl documentation provides multiple ways to install Hurl. For this example, Debian 11 Bullseye (slim) is used. Hurl comes with a package dependency on `libxml2` which can either be installed manually with then running the `dpkg` command, or by using `apt install` to install a local package and automatically resolve the dependencies.\n\nThe following CI/CD example uses a job template which defines the Hurl version as environment variable to avoid repetitive use, and downloads and installs the Hurl Debian package. The `hurl-gitlab-com` job extends the CI/CD job template and runs a one-line test against `https://gitlab.com` and expects to return `HTTP/2` as HTTP protocol version, and `200` as status.\n\n```yaml\n# CI/CD job template\n.hurl-tmpl:\n  variables:\n    HURL_VERSION: 1.8.0\n  before_script:\n    - DEBIAN_FRONTEND=noninteractive apt update && apt -y install jq curl ca-certificates\n    - curl -LO \"https://github.com/Orange-OpenSource/hurl/releases/download/${HURL_VERSION}/hurl_${HURL_VERSION}_amd64.deb\"\n    - DEBIAN_FRONTEND=noninteractive apt -y install \"./hurl_${HURL_VERSION}_amd64.deb\"\n\nhurl-gitlab-com:\n  extends: .hurl-tmpl\n  script:\n    - echo -e \"GET https://gitlab.com\\n\\nHTTP/2 200\" | hurl --test --color\n```\n\nThe next section describes how to optimize the CI/CD pipelines for more efficient schedules and runs to monitor websites and not waste too many resources and CI/CD minutes. You can also skip it and [scroll down to more advanced Hurl examples in GitLab CI/CD](#devsecops-workflows-with-hurl).\n\n### CI/CD efficiency: Hurl container image\n\nThe installation steps for Hurl, and its dependencies, can waste resources and increase the pipeline job runtime every time. To make the CI/CD pipelines more efficient, we want to use a container image that already provides Hurl pre-installed. The following steps are required for creating a container image:\n\n- Use Debian 11 Slim (FROM).\n- Install dependencies to download Hurl (`curl`, `ca-certificates`). `jq` is installed for convenience to access it from CI/CD commands when needed later.\n- Download the Hurl Debian package, and use `apt install` to install its dependencies automatically.\n- Clear the apt lists cache to enforce apt update again, and avoid security issues.\n- Hurl is installed into the PATH, specify the default command being run. This allows running the container without having to specify a command.\n\nThe steps to install the packages are separated for better readability; an optimization for the `docker-build` job can happen by chaining the `RUN` commands into one long command.\n\n`Dockerfile`\n```\nFROM debian:11-slim\n\nENV DEBIAN_FRONTEND noninteractive\n\nARG HURL_VERSION=1.8.0\n\nRUN apt update && apt install -y curl jq ca-certificates\nRUN curl -LO \"https://github.com/Orange-OpenSource/hurl/releases/download/${HURL_VERSION}/hurl_${HURL_VERSION}_amd64.deb\"\n# Use apt install to determine package dependencies instead of dpkg\nRUN apt -y install \"./hurl_${HURL_VERSION}_amd64.deb\"\nRUN rm -rf /var/lib/apt/lists/*\n\nCMD [\"hurl\"]\n```\n\nNote that the `HURL_VERSION` variable can be overridden by passing the variable and value into the container build job later. It is intentionally not using an automated script that always uses the [latest release](https://github.com/Orange-OpenSource/hurl/releases) to avoid breaking the behavior, and enforces a controlled upgrade cycle for container images in production.\n\nOn GitLab.com SaaS, you can include the `Docker.gitlab-ci.yml` CI/CD template which will automatically detect the `Dockerfile` file and start building the image using the shared runners, and push it to the [GitLab container registry](https://docs.gitlab.com/ee/user/packages/container_registry/). For self-managed instances or own runners on GitLab.com SaaS, it is recommended to decide whether to use and setup [Docker-in-Docker](https://docs.gitlab.com/ee/ci/docker/using_docker_build.html) or [Kaniko](https://docs.gitlab.com/ee/ci/docker/using_kaniko.html), Podman, or other container image build tools.\n\n```yaml\ninclude:\n  - template: Docker.gitlab-ci.yml\n```\n\nTo avoid running the Docker image build job every time, the job override definition specifies to [run it manually](https://docs.gitlab.com/ee/ci/yaml/#when). You can also use rules to [choose when to run the job](https://docs.gitlab.com/ee/ci/jobs/job_control.html), only when a Git tag is pushed for example.\n\n```yaml\ninclude:\n  - template: Docker.gitlab-ci.yml\n\n# Change Docker build to manual non-blocking\ndocker-build:\n  rules:\n    - if: '$CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH'\n      when: manual\n      allow_failure: true\n```\n\nOnce the container image is pushed to the registry, navigate into `Packages and Registries > Container Registries` and inspect the tagged image. Copy the image path for the latest tagged version and use it for the `image` attribute in the CI/CD job configuration.\n\n### Hurl container image in GitLab CI/CD example\n\nThe full example uses the previously built container image, and specifies the default `HURL_URL` variable. This can later be overridden by job definitions.\n\n_Please note that the image URL `registry.gitlab.com/everyonecancontribute/dev/hurl-playground:latest` is only used for demo purposes and not actively maintained or updated._\n\n```yaml\ninclude:\n  - template: Docker.gitlab-ci.yml\n\n# Change Docker build to manual non-blocking\ndocker-build:\n  rules:\n    - if: '$CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH'\n      when: manual\n      allow_failure: true\n\n# Hurl job template\n.hurl-tmpl:\n  image: registry.gitlab.com/everyonecancontribute/dev/hurl-playground:latest\n  variables:\n    HURL_URL: gitlab.com\n\n# Hurl jobs that check websites\nhurl-dnsmichi-at:\n  extends: .hurl-tmpl\n  variables:\n    HURL_URL: dnsmichi.at\n  script:\n    - echo -e \"GET https://${HURL_URL}\\n\\nHTTP/1.1 200\" | hurl --test --color\n\nhurl-opsindev-news:\n  extends: .hurl-tmpl\n  variables:\n    HURL_URL: opsindev.news\n  script:\n    - echo -e \"GET https://${HURL_URL}\\n\\nHTTP/2 200\" | hurl --test --color\n```\n\nThe CI/CD configuration can further be optimized:\n\n- Create job templates that execute the same scripts and only differ in the `HURL_URL` variable.\n- Use Hurl configuration files that allow specifying variables on the CLI or as environment variables. More on this in the next section.\n\n## DevSecOps workflows with Hurl\n\nHurl allows users to describe HTTP instructions in a configuration file with the `.hurl` suffix. You can add the configuration files to Git, and review and approve changes in merge requests - with the changes run in CI/CD and reporting back any failures before merging.\n\nInspect the `use-cases/` directory in the [example project](https://gitlab.com/everyonecancontribute/dev/hurl-playground), and fork it to make changes and commit and run the CI/CD pipelines and reports. You can also clone the project and run the `tree` command in the terminal.\n\n```sh\n$ tree use-cases\nuse-cases\n├── dnsmichi.at.hurl\n├── gitlab-com-api.hurl\n├── gitlab-contribute.hurl\n└── hackernews.hurl\n```\n\nHurl supports the glob option which collects all configuration files matching a specific pattern.\n\n![Hurl configuration file run](https://about.gitlab.com/images/blogimages/hurl-continuous-website-testing/hurl_multiple_config_files_run.png)\n\n### Chaining requests\n\nSimilar to CI/CD pipelines, jobs, and stages, testing HTTP endpoints with Hurl can require multiple steps. First, ping the website for being reachable, and then try parsing expected results. Separating the requirements into two steps helps to analyze errors.\n\n- HTTP endpoint reachable, but expected string not in response - static website was changed, REST API misses a field, etc.\n- HTTP endpoint is unreachable, don’t try to understand why the follow-up tests fail.\n\nThe following example first sends a ping probe to the dev instance, and a check towards the production environment in the second request.\n\n```sh\n$ vim use-cases/everyonecancontribute-com.hurl\n\nGET https://everyonecancontribute.dev\n\nHTTP/2 200\n\nGET https://everyonecancontribute.com\n\nHTTP/2 200\n$ hurl --test use-cases/everyonecancontribute-com.hurl\n```\n\nIn this scenario, the TLS certificate of the dev instance expired, and Hurl halts the test immediately.\n\n![Hurl chained requests, failing the first test with TLS certificate problems](https://about.gitlab.com/images/blogimages/hurl-continuous-website-testing/hurl_chained_request_fail.png)\n\n### Hurl reports as JUnit test reports\n\nTreat website monitoring and web app tests as unit and end-to-end tests. The Hurl developers thought of that too - the CLI command provides different output options for the report: `--report-junit \u003Coutputpath>` integrates with [GitLab JUnit report](https://docs.gitlab.com/ee/ci/testing/unit_test_reports.html) support into merge requests and pipeline views.\n\nThe following configuration generates a JUnit report file into the value of the `HURL_JUNIT_REPORT` variable. It exists to avoid typing the path three times. The Hurl tests are run from the `use-cases/` directory using a glob pattern.\n\n```yaml\n# Hurl job template\n.hurl-tmpl:\n    image: registry.gitlab.com/everyonecancontribute/dev/hurl-playground:latest\n    variables:\n        HURL_URL: gitlab.com\n        HURL_JUNIT_REPORT: hurl_junit_report.xml\n\n# Hurl tests from configuration file, generating JUnit report integration in GitLab CI/CD\nhurl-report:\n    extends: .hurl-tmpl\n    script:\n      - hurl --test use-cases/*.hurl --report-junit $HURL_JUNIT_REPORT\n    after_script:\n      # Hack: Workaround for 'id' instead of 'name' in JUnit report from Hurl. https://gitlab.com/gitlab-org/gitlab/-/issues/299086\n      - sed -i 's/id/name/g' $HURL_JUNIT_REPORT\n    artifacts:\n      when: always\n      paths:\n        - $HURL_JUNIT_REPORT\n      reports:\n        junit: $HURL_JUNIT_REPORT\n```\n\nThe JUnit format returned by Hurl 1.8.0 defines the `id` attribute, but the GitLab JUnit integration expects the `name` attribute to be present. While writing this blog post, [the problem was discussed](https://github.com/Orange-OpenSource/hurl/issues/1067#issuecomment-1343264751) with the maintainers, and [the `name` attribute was implemented](https://github.com/Orange-OpenSource/hurl/issues/1078) and will be available in future releases. As a workaround with Hurl 1.8.0, the CI/CD [after_script](https://docs.gitlab.com/ee/ci/yaml/#after_script) section uses `sed` to replace the attributes after generating the report.\n\nThe [following example](https://gitlab.com/everyonecancontribute/dev/hurl-playground/-/merge_requests/10) fails on purpose with checking a different HTTP protocol version.\n\n```\nGET https://opsindev.news\n\n# This will fail on purpose\nHTTP/1.1 200\n\n[Asserts]\nbody contains \"Michael Friedrich\"\n```\n\n![Hurl test report in JUnit format integrated into GitLab](https://about.gitlab.com/images/blogimages/hurl-continuous-website-testing/hurl_gitlab_junit_integration_merge_request_widget_overlay.png)\n\nOnce the JUnit integration with Hurl tests from a glob pattern work, you can continue adding new `.hurl` configuration files to the GitLab repository and start testing in MRs, which will require review and approval workflows for production then.\n\n### Web review apps\n\nWebsite monitoring is only one aspect of using Hurl: Testing web applications deployed in review environments in the cloud, and in cloud-native clusters provides a native integration into [DevSecOps](https://about.gitlab.com/topics/devsecops/) workflows. The CI/CD pipelines will fail when Hurl tests are failing, and more insights are provided using merge request widgets reports.\n\n[Cloud Seed](https://docs.gitlab.com/ee/cloud_seed/index.html) provides the ability to deploy a web application to a major cloud provider, for example Google Cloud. After the deployment is successful, additional CI/CD jobs can be configured that verify that the deployed web app version does not introduce a regression, and provides all required data elements, API endpoints, etc. A similar workflow can be achieved by using review app environments with [webservers (Nginx, etc.), Docker, AWS, and Kubernetes](https://docs.gitlab.com/ee/ci/review_apps/#review-apps-examples). The review app [environment URL](https://docs.gitlab.com/ee/ci/environments/#create-a-dynamic-environment) is important for instrumenting the Hurl tests dynamically. The CI/CD variable [`CI_ENVIRONMENT_URL`](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html) is available when `environment:url` is specified in the review app configuration.\n\nThe following example tests the review app for [this blog post when written in a merge request](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/115548):\n\n```yaml\n# Test review apps with hurl for this blog post.\nhurl-review-test:\n  extends: .review-environment # inherits the environment settings\n  needs: [uncategorized-build-and-review-deploy] # waits until the website (sites/uncategorized) is deployed\n  stage: test\n  rules: # YAML anchor that runs the job only on merge requests\n    - \u003C\u003C: *if-merge-request-original-repo\n  image:\n    name: ghcr.io/orange-opensource/hurl:1.8.0\n    entrypoint: [\"\"]\n  script:\n    - echo -e \"GET ${CI_ENVIRONMENT_URL}\\n\\nHTTP/1.1 200\" | hurl --test --color\n```\n\nThe environment is specified in the [.review-environment job template](https://gitlab.com/gitlab-com/www-gitlab-com/-/blob/91d6fd72a424a3d913e79ebc2aefb23bbab85863/.gitlab-ci.yml#L332) and used to [deploy the website review job](https://gitlab.com/gitlab-com/www-gitlab-com/-/blob/91d6fd72a424a3d913e79ebc2aefb23bbab85863/.gitlab-ci.yml#L532). The relevant configuration snippet is shown here:\n\n```yaml\n.review-environment:\n  variables:\n    DEPLOY_TYPE: review\n  environment:\n    name: review/$CI_COMMIT_REF_SLUG\n    url: https://$CI_COMMIT_REF_SLUG.about.gitlab-review.app\n    on_stop: review-stop\n    auto_stop_in: 30 days\n```\n\nThe deployment of the www-gitlab-com project [uses buckets in Google Cloud](https://gitlab.com/gitlab-com/www-gitlab-com/-/blob/91d6fd72a424a3d913e79ebc2aefb23bbab85863/scripts/deploy) that serve the website content in the review app. There are different types of web applications that require different deployment methods - as long as the environment URL variable is available in CI/CD and the deployment URL is accessible from the GitLab Runner executing the CI/CD job, you can continously test web apps with Hurl!\n\n![Hurl test in GitLab CI/CD for review app environments](https://about.gitlab.com/images/blogimages/hurl-continuous-website-testing/hurl_gitlab_cicd_review_app_environment_tests_www-gitlab-com.png)\n\n## Development tips\n\nUse the [`--verbose` parameter](https://hurl.dev/docs/tutorial/debug-tips.html) to see the full request and response flow. Hurl also provides tips which `curl` command could be run to fetch more data. This can be helpful when starting to use or develop a new REST API, or learning to understand the JSON structure of HTTP responses. Chaining the `curl` command with `jq` (the `curl ... | jq` pattern) can still be helpful to fetch data, and build the HTTP tests in a second terminal or editor window.\n\n```sh\n$ curl -s 'https://gitlab.com/api/v4/projects' | jq\n$ curl -s 'https://gitlab.com/api/v4/projects' | jq -c '.[]' | jq\n\n{\"id\":41375401,\"description\":\"An example project for a GitLab pipeline.\",\"name\":\"Calculator\",\"name_with_namespace\":\"Iva Tee / Calculator\",\"path\":\"calculator\",\"path_with_namespace\":\"snufkins_hat/calculator\",\"created_at\":\"2022-11-26T00:32:33.825Z\",\"default_branch\":\"master\",\"tag_list\":[],\"topics\":[],\"ssh_url_to_repo\":\"git@gitlab.com:snufkins_hat/calculator.git\",\"http_url_to_repo\":\"https://gitlab.com/snufkins_hat/calculator.git\",\"web_url\":\"https://gitlab.com/snufkins_hat/calculator\",\"readme_url\":\"https://gitlab.com/snufkins_hat/calculator/-/blob/master/README.md\",\"avatar_url\":null,\"forks_count\":0,\"star_count\":0,\"last_activity_at\":\"2022-11-26T00:32:33.825Z\",\"namespace\":{\"id\":58849237,\"name\":\"Iva Tee\",\"path\":\"snufkins_hat\",\"kind\":\"user\",\"full_path\":\"snufkins_hat\",\"parent_id\":null,\"avatar_url\":\"https://secure.gravatar.com/avatar/a3efe834950275380d5f19c9b17c922c?s=80&d=identicon\",\"web_url\":\"https://gitlab.com/snufkins_hat\"}}\n```\n\nThe GitLab projects API returns an array of elements, where we can inspect the `id` and `name` attributes for a simple test - the first element’s name must not be empty, the second element’s id needs to be greater than 0.\n\n```sh\n$ vim gitlab-com-api.hurl\n\nGET https://gitlab.com/api/v4/projects\n\nHTTP/2 200\n\n[Asserts]\njsonpath \"$[0].name\" != \"\"\njsonpath \"$[1].id\" > 0\n\n$ hurl --test gitlab-com-api.hurl\n\ngitlab-com-api.hurl: Running [1/1]\ngitlab-com-api.hurl: Success (1 request(s) in 728 ms)\n--------------------------------------------------------------------------------\nExecuted files:  1\nSucceeded files: 1 (100.0%)\nFailed files:    0 (0.0%)\nDuration:        730 ms\n```\n\n## More use cases\n\n- Work with HTTP sessions and [cookies](https://hurl.dev/docs/request.html#cookies), test [forms with parameters](https://hurl.dev/docs/request.html#form-parameters).\n- Review existing API tests of your applications.\n- Build advanced chained workflows with GET, POST, PUT, DELETE, and more HTTP methods.\n- Integrate simple ping/HTTP monitoring health checks into the DevSecOps Platform using alerts and incident management.\n\nIf the Hurl checks cannot be integrated directly inside the project where the application is developed and deployed, another idea could be to create a standalone GitLab project that has CI/CD pipeline schedules enabled. It can continuously run the Hurl tests, and parse the reports or trigger an event when the pipeline is failing, and [create an alert](https://docs.gitlab.com/ee/operations/incident_management/alerts.html) by sending a JSON payload from the Hurl results to the [HTTP endpoint](https://docs.gitlab.com/ee/operations/incident_management/integrations.html#single-http-endpoint). Developers can send MRs to update the Hurl tests, and maintainers review and approve the new test suites being rolled out into production. Alternatively, move the complete CI/CD configuration into a group/project with different permissions, and specify the CI/CD configuration as remote URL in the web application project. This compliance level helps to control who can make changes to important tests and CI/CD configuration.\n\nHurl supports `--json` as parameter to only return the JSON formatted test result and build own custom reports and integrations.\n\n```sh\n$ echo -e \"GET https://about.gitlab.com/teamops/\\n\\nHTTP/2 200\" | hurl --json | jq\n```\n\nFor folks in DevRel, monitoring certain websites for keywords or checking APIs whether values increase a certain threshold can be interesting. Here is an example for monitoring Hacker News using the Algolia search API, inspired by the [Zapier integration used for GitLab Slack](/handbook/marketing/developer-relations/workflows-tools/zapier/#zaps-for-hacker-news). The `QueryStringParams` section allows users to define the query parameters as a readable list, which is easier to modify. The `jsonpath` checks searches for the `hits` key and its count being zero (not on the Hacker News front page means OK in this example).\n\n```\n$ vim hackernews.hurl\n\nGET https://hn.algolia.com/api/v1/search\n[QueryStringParams]\nquery: gitlab\n#query: hurl\ntags: front_page\n\nHTTP/2 200\n\n[Asserts]\njsonpath \"$.hits\" count == 0\n\n$ hurl --test hackernews.hurl\n```\n\n## Limitations\n\nHurl works great for testing websites and web applications that serve static content, and by sending different HTTP request types, data, etc., and ensuring that responses match expectations. Compared to other end-to-end testing solutions (Selenium, etc.), Hurl does not provide a JavaScript engine and only can parse the raw DOM or JSON response. It does not support a DOM managed and rendered by JavaScript front-end frameworks. UI integration tests also need to be performed with different tools, similar to full end-to-end test workflows. Other examples are [accessibility testing](https://docs.gitlab.com/ee/ci/testing/accessibility_testing.html) and [browser performance testing](https://docs.gitlab.com/ee/ci/testing/browser_performance_testing.html). If you are curious how end-to-end testing is done for GitLab, the product, peek into the [development documentation](https://docs.gitlab.com/ee/development/testing_guide/end_to_end/).\n\n## Conclusion\n\nHurl provides an easy way to test HTTP endpoints (such as websites and APIs) in a fast and reliable way. The CLI commands can be integrated into CI/CD workflows, and the configuration syntax and files provide a single source of truth for everything. Additional support for JUnit report formats ensure that website testing is fully integrated into the [DevSecOps](https://about.gitlab.com/topics/devsecops/) platform, and increases visibility and extensibility with automating tests, and monitoring. There are known limitations with dynamic JavaScript websites and advanced UI/end-to-end testing workflows.\n\nHurl is open source, [created and maintained by Orange](https://opensource.orange.com/en/open-source-orange/), and written in Rust. This blog post inspired contributions to the [Debian/Ubuntu installation documentation](https://github.com/Orange-OpenSource/hurl/pull/1084) and [default issue templates](https://github.com/Orange-OpenSource/hurl/pull/1083).\n\n**Tip:** Practice using Hurl on the command line, and remember it when the next production incident shows a strange API behavior with POST requests.\n\nThanks to [Lee Tickett](/company/team/#leetickett-gitlab) who inspired me to test Hurl in GitLab CI/CD and write this blog post after seeing huge interest in a [Twitter share](https://twitter.com/dnsmichi/status/1595820546062778369).\n\nCover image by [Aaron Burden](https://unsplash.com/@aaronburden) on [Unsplash](https://unsplash.com)\n{: .note}\n",[1158,976,9],{"slug":5024,"featured":6,"template":686},"how-to-continously-test-web-apps-apis-with-hurl-and-gitlab-ci-cd","content:en-us:blog:how-to-continously-test-web-apps-apis-with-hurl-and-gitlab-ci-cd.yml","How To Continously Test Web Apps Apis With Hurl And Gitlab Ci Cd","en-us/blog/how-to-continously-test-web-apps-apis-with-hurl-and-gitlab-ci-cd.yml","en-us/blog/how-to-continously-test-web-apps-apis-with-hurl-and-gitlab-ci-cd",{"_path":5030,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5031,"content":5037,"config":5042,"_id":5044,"_type":14,"title":5045,"_source":16,"_file":5046,"_stem":5047,"_extension":19},"/en-us/blog/how-to-deploy-react-to-amazon-s3",{"title":5032,"description":5033,"ogTitle":5032,"ogDescription":5033,"noIndex":6,"ogImage":5034,"ogUrl":5035,"ogSiteName":670,"ogType":671,"canonicalUrls":5035,"schema":5036},"How to deploy a React application to Amazon S3 using GitLab CI/CD","Follow this guide to use OpenID Connect to connect to AWS and deploy a React application to Amazon S3.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663291/Blog/Hero%20Images/cover1.jpg","https://about.gitlab.com/blog/how-to-deploy-react-to-amazon-s3","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to deploy a React application to Amazon S3 using GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jeremy Wagner\"}],\n        \"datePublished\": \"2023-03-01\",\n      }",{"title":5032,"description":5033,"authors":5038,"heroImage":5034,"date":5039,"body":5040,"category":791,"tags":5041},[4943],"2023-03-01","\n\nAmazon S3 has a Static Website Hosting feature which allows you to host a static website directly from an S3 bucket. When you \nhost your website on S3, your website content is stored in the S3 bucket and served directly to your users, without the need \nfor additional resources. Combine this with Amazon CloudFront and you will have a cost-effective and scalable solution for \nhosting static websites – making it a popular choice for single-page applications.\n\nIn this post, I will walk you through setting up your Amazon S3 bucket, setting up OpenID Connect ([OIDC](https://openid.net/connect/)) in AWS, and deploying your application \nto your Amazon S3 bucket using a GitLab [CI/CD](/topics/ci-cd/) pipeline.\n\nBy the end of this post, you will have a [CI/CD pipeline](/blog/how-to-keep-up-with-ci-cd-best-practices/) built in GitLab that automatically deploys to your Amazon S3 bucket. Let's dive in.\n\n## Prerequisites\n\nFor this guide you will need the following:\n\n- [Node.js](https://nodejs.org/en/) >= 14.0.0 and npm >= 5.6 installed on your system\n- [Git](https://git-scm.com/) installed on your system\n- A [GitLab](https://gitlab.com/-/trial_registrations/new) account\n- An [AWS](https://aws.amazon.com/free/) account\n\n[A previous tutorial](/blog/how-to-automate-testing-for-a-react-application-with-gitlab/) demonstrated how to create a new React \napplication, run unit tests as part of the CI process in GitLab, and output the test results and code coverage into the pipeline. This post continues where that project left off, so to follow along you can fork [this project](https://gitlab.com/guided-explorations/engineering-tutorials/react-unit-testing) or complete the guide in the linked post.\n\n## Configure your Amazon S3 bucket\n\nYou'll need to configure your Amazon S3 bucket so let's do that first.\n\n### Create your bucket\n\nAfter you log in to your AWS account, search for S3 using the search bar and select the S3 service. This will open the S3 service home page.\n\nRight away, you should see the option to create a bucket. The bucket is where you are going to store your built React application. Click the **Create bucket** button to continue.\n\n![Create S3 bucket](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/create_bucket.png){: .shadow}\n\nGive your bucket a name, select your region, leave the rest of the settings as default (we’ll come back to these later), and continue by \nclicking the **Create bucket** button. When naming your bucket, it’s important to remember that your bucket name must be unique and follow the \nbucket naming rules. I named mine `jw-gl-react`.\n\nAfter creating your bucket, you should be taken to a list of your buckets as shown below.\n\n![S3 bucket list](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/bucket_list.png){: .shadow}\n\n### Configure static website hosting\n\nThe next step is to configure static website hosting. Open your S3 bucket by clicking into the bucket name. Select the **Properties** tab and \nscroll to the bottom to find the static website hosting option.\n\n![static hosting button](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/static_hosting_1.png){: .shadow}\n\nClick **Edit** and then enable static website hosting. For the **Index** and **Error** document, enter `index.html` and then click **Save changes**.\n\n![edit static hosting](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/static_hosting_2.png){: .shadow}\n\n### Set up permissions\n\nNow that you have enabled static website hosting, you need to update your permissions so the public can visit your website. Return to your bucket and select the **Permissions** tab.\n\nUnder **Block public access (bucket settings)**, click **Edit** and uncheck **Block all public access** and continue to **Save changes**.\n\n![block public access](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/block_access_1.png){: .shadow}\n\nYour page should now look this this:\n\n![saved blocked public access](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/block_access_2.png){: .shadow}\n\nNow, you need to edit the Bucket Policy. Click the **Edit** button in the **Bucket Policy** section. Paste the following code into your new policy:\n\n```javascript\n{\n    \"Version\": \"2012-10-17\",\n    \"Statement\": [\n        {\n            \"Sid\": \"PublicReadGetObject\",\n            \"Effect\": \"Allow\",\n            \"Principal\": \"*\",\n            \"Action\": \"s3:GetObject\",\n            \"Resource\": \"arn:aws:s3:::jw-gl-react/*\"\n        }\n    ]\n}\n```\n\nReplace `jw-gl-react` on the resource property with the name of your bucket and **Save changes**.\n\nYour bucket should now look like this:\n\n![publicly accessible bucket](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/block_access_3.png){: .shadow}\n\n## Manually upload your React application\n\nNow, let’s build your React application and manually publish it to your S3 bucket. \n\nTo build the application, make sure your project is cloned to your local machine and run the following command in your terminal inside of your \nrepository directory:\n\n```\nnpm run build\n```\n\nThis will create a build folder inside of your repository directory.\n\nInside of your bucket, click the **Upload** button.\n\n![manual bucket upload](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/upload_1.png){: .shadow}\n\nDrag the contents of your newly created build folder (not the folder itself) into the upload area. This will \nupload the contents of your application into your S3 bucket. Make sure to click **Upload** at the bottom of the page to start the upload.\n\nNow return to your bucket **Properties** tab and scroll to the bottom to find the URL of your static website.\n\n![static website url](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/upload_2.png){: .shadow}\n\nClick the link and you should see your built React application open in your browser.\n\n![deployed app](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/manual_deploy.png){: .shadow}\n\n## Set up OpenID Connect in AWS\n\nTo deploy to your S3 Bucket from GitLab, we’re going to use a GitLab CI/CD job to receive temporary credentials \nfrom AWS without needing to store secrets. To do this, we’re going to configure OIDC for ID federation \nbetween GitLab and AWS. We’ll be following the [related GitLab documentation](https://docs.gitlab.com/ee/ci/cloud_services/aws/).\n\n### Add the identity provider\n\nThe first step is going to be adding GitLab as an identity and access management (IAM) OIDC provider in AWS. AWS has instructions located [here](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_oidc.html), \nbut I will walk through it step by step.\n\nOpen the IAM console inside of AWS.\n\n![iam search](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/iam_1.png){: .shadow}\n\nOn the left navigation pane, under **Access management** choose **Identity providers** and then choose **Add provider**. \nFor provider type, select **OpenID Connect**.\n\nFor **Provider URL**, enter the address of your GitLab instance, such as `https://gitlab.com` or `https://gitlab.example.com`.\n\nFor **Audience**, enter something that is generic and specific to your application. In my case, I'm going to \nenter `react_s3_gl`. To prevent confused deputy attacks, it's best to make this something that is not easy to guess. Take a note of \nthis value, you will use it to set the `ID_TOKEN` in your `.gitlab-ci.yml` file.\n\nAfter entering the **Provider URL**, click **Get thumbprint** to verify the server certificate of your IdP. After this, go \nahead and choose **Add provider** to finish up.\n\n### Create the permissions policy\n\nAfter you create the identity provider, you need to create a permissions policy.\n\nFrom the IAM dashboard, under **Access management** select **Policies** and then **Create policy**. \nSelect the JSON tab and paste the following policy replacing `jw-gl-react` on the resource line with your bucket name.\n\n```javascript\n{\n  \"Version\": \"2012-10-17\",\n  \"Statement\": [\n    {\n      \"Effect\": \"Allow\",\n      \"Action\": [\"s3:ListBucket\"],\n      \"Resource\": [\"arn:aws:s3:::jw-gl-react\"]\n    },\n    {\n      \"Effect\": \"Allow\",\n      \"Action\": [\n        \"s3:PutObject\",\n        \"s3:GetObject\",\n        \"s3:DeleteObject\"\n      ],\n      \"Resource\": [\"arn:aws:s3:::jw-gl-react/*\"]\n    }\n  ]\n}\n```\n\nSelect the **Next: Tags** button, add any tags you want, and then select the **Next: Review** button. \nEnter a name for your policy and finish up by creating the policy. \n\n### Configure the role\n\nNow it’s time to add the role. From the IAM dashboard, under **Access management** select **Roles** \nand then select **Create role**. Select **Web identity**.\n\nIn the **Web identity** section, select the identity provider you created earlier. For the \n**Audience**, select the audience you created earlier. Select the **Next** button to continue.\n\nIf you wanted to limit authorization to a specific group, project, branch, or tag, you could create a **Custom trust policy** \ninstead of a **Web identity**. Since I will be deleting these resources after the tutorial, I'm going to keep it simple. For a \nfull list of supported filterting types, see the [GitLab documentation](https://docs.gitlab.com/ee/ci/cloud_services/index.html#configure-a-conditional-role-with-oidc-claims).\n\n![web identity](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/iam_2.png){: .shadow}\n\nDuring the **Add permissions** step, select the policy you created and select **Next** to continue. Give your role a name and click **Create role**.\n\nOpen the Role you just created. In the summary section, find the Amazon Resource Name (ARN) and save it somewhere secure. You will use this in your pipeline.\n\n![role](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/iam_3.png){: .shadow}\n\n## Deploy to your Amazon S3 bucket using a GitLab CI/CD pipeline\n\nInside of your project, create two [CI/CD variables](https://docs.gitlab.com/ee/ci/variables/#define-a-cicd-variable-in-the-ui). The first variable should be named `ROLE_ARN`. For the value, paste the ARN of the \nrole you just created. The second variable should be named `S3_BUCKET`. For the value, paste the name of the S3 bucket you created \nearlier in this post.\n\nI have chosen to mask my variables for an extra layer of security.\n\n### Retrieve your temporary credentials\n\nInside of your `.gitlab-ci.yml` file, paste the following code:\n\n```\n.assume_role: &assume_role\n    - >\n      STS=($(aws sts assume-role-with-web-identity\n      --role-arn ${ROLE_ARN}\n      --role-session-name \"GitLabRunner-${CI_PROJECT_ID}-${CI_PIPELINE_ID}\"\n      --web-identity-token $ID_TOKEN\n      --duration-seconds 3600\n      --query 'Credentials.[AccessKeyId,SecretAccessKey,SessionToken]'\n      --output text))\n    - export AWS_ACCESS_KEY_ID=\"${STS[0]}\"\n    - export AWS_SECRET_ACCESS_KEY=\"${STS[1]}\"\n    - export AWS_SESSION_TOKEN=\"${STS[2]}\"\n```\n\nThis is going to use the the AWS Security Token Service to generate temporary (_3,600 seconds_) credentials utilizing the OIDC role you created earlier.\n\n### Create the deploy job\n\nNow, let's add a build and deploy job to build your application and deploy it to your S3 bucket.\n\nFirst, update the stages in your `.gitlab-ci.yml` file to include a `build` and `deploy` stage as shown below:\n\n```\nstages:\n  - build\n  - test\n  - deploy\n```\n\nNext, let's add a job to build your application. Paste the following code in your `.gitlab-ci.yml` file:\n\n```\nbuild artifact:\n  stage: build\n  image: node:latest\n  before_script:\n    - npm install\n  script:\n    - npm run build\n  artifacts:\n    paths:\n      - build/\n    when: always\n  rules:\n    - if: '$CI_COMMIT_REF_NAME == \"main\"'\n      when: always\n```\n\nThis is going to run `npm run build` if the change occurs on the `main` branch and upload the build directory as an \nartifact to be used during the next step.\n\nNext, let's add a job to actually deploy to your S3 bucket. Paste the following code in your `.gitlab-ci.yml` file:\n\n```\ndeploy s3:\n  stage: deploy\n  image:\n    name: amazon/aws-cli:latest\n    entrypoint: \n      - '/usr/bin/env'\n  id_tokens:\n      ID_TOKEN:\n        aud: react_s3_gl\n  script:\n    - *assume_role\n    - aws s3 sync build/ s3://$S3_BUCKET\n  rules:\n    - if: '$CI_COMMIT_REF_NAME == \"main\"'\n      when: always\n```\n\nThis uses [YAML anchors](https://docs.gitlab.com/ee/ci/yaml/yaml_optimization.html#yaml-anchors-for-scripts) to run the `assume_role` script, \nand then uses the `aws cli` to upload your build artifact to the bucket you defined as a variable. This job also only runs if the change occurs \non the `main` branch.\n\nMake sure the `aud` value matches the value you entered for your audience when you setup the identity provider. In my case, I entered `react-s3_gl`.\n\nYour complete `.gitlab-ci.yml` file should look like this:\n\n```\nstages:\n  - build\n  - test\n  - deploy\n\n.assume_role: &assume_role\n    - >\n      STS=($(aws sts assume-role-with-web-identity\n      --role-arn ${ROLE_ARN}\n      --role-session-name \"GitLabRunner-${CI_PROJECT_ID}-${CI_PIPELINE_ID}\"\n      --web-identity-token $ID_TOKEN\n      --duration-seconds 3600\n      --query 'Credentials.[AccessKeyId,SecretAccessKey,SessionToken]'\n      --output text))\n    - export AWS_ACCESS_KEY_ID=\"${STS[0]}\"\n    - export AWS_SECRET_ACCESS_KEY=\"${STS[1]}\"\n    - export AWS_SESSION_TOKEN=\"${STS[2]}\"\n  \nunit test:\n  image: node:latest\n  stage: test\n  before_script:\n    - npm install\n  script:\n    - npm run test:ci\n  coverage: /All files[^|]*\\|[^|]*\\s+([\\d\\.]+)/\n  artifacts:\n    paths:\n      - coverage/\n    when: always\n    reports:\n      junit:\n        - junit.xml\n\nbuild artifact:\n  stage: build\n  image: node:latest\n  before_script:\n    - npm install\n  script:\n    - npm run build\n  artifacts:\n    paths:\n      - build/\n    when: always\n  rules:\n    - if: '$CI_COMMIT_REF_NAME == \"main\"'\n      when: always\n\n\ndeploy s3:\n  stage: deploy\n  image:\n    name: amazon/aws-cli:latest\n    entrypoint: \n      - '/usr/bin/env'\n  id_tokens:\n      ID_TOKEN:\n        aud: react_s3_gl\n  script:\n    - *assume_role\n    - aws s3 sync build/ s3://$S3_BUCKET\n  rules:\n    - if: '$CI_COMMIT_REF_NAME == \"main\"'\n      when: always\n```\n\n### Make a change and test your pipeline\n\nTo test your pipeline, inside of `App.js`, change this line `Edit \u003Ccode>src/App.js\u003C/code> and save to reload.` to \n`This was deployed from GitLab!` and commit your changes to the `main` branch. The pipeline should kick off and when \nit finishes successfully you should see your updated application at the URL of your static website.\n\n![updated app](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/auto_deploy.png){: .shadow}\n\nYou now have a CI/CD pipeline built in GitLab that receives temporary credentials from AWS using OIDC and \nautomatically deploys to your Amazon S3 bucket. To take it a step further, you can [secure your application](https://docs.gitlab.com/ee/user/application_security/secure_your_application.html) \nwith GitLab's built-in security tools.\n\nAll code for this project can be found [here](https://gitlab.com/guided-explorations/engineering-tutorials/react-s3).\n\nCover image by [Lucas van Oor](https://unsplash.com/@switch_dtp_fotografie?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/bucket?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n\n## Related posts and documentation\n- [How to automate testing for a React application with GitLab](/blog/how-to-automate-testing-for-a-react-application-with-gitlab/)\n- [How to deploy AWS with GitLab](/blog/deploy-aws/)\n- [Deploy to AWS from GitLab CI/CD](https://docs.gitlab.com/ee/ci/cloud_deployment/)\n- [Configure OpenID Connect in AWS to retrieve temporary credentials](https://docs.gitlab.com/ee/ci/cloud_services/aws/)\n- [Secure GitLab CI/CD workflows using OIDC JWT on a DevSecOps platform](https://about.gitlab.com/blog/oidc/)\n",[9,109],{"slug":5043,"featured":6,"template":686},"how-to-deploy-react-to-amazon-s3","content:en-us:blog:how-to-deploy-react-to-amazon-s3.yml","How To Deploy React To Amazon S3","en-us/blog/how-to-deploy-react-to-amazon-s3.yml","en-us/blog/how-to-deploy-react-to-amazon-s3",{"_path":5049,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5050,"content":5056,"config":5061,"_id":5063,"_type":14,"title":5064,"_source":16,"_file":5065,"_stem":5066,"_extension":19},"/en-us/blog/how-to-easily-launch-gitlab-through-cloud-marketplaces",{"title":5051,"description":5052,"ogTitle":5051,"ogDescription":5052,"noIndex":6,"ogImage":5053,"ogUrl":5054,"ogSiteName":670,"ogType":671,"canonicalUrls":5054,"schema":5055},"How to easily launch GitLab through cloud marketplaces","Bitnami makes publishing GitLab into Azure Marketplace simple.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670001/Blog/Hero%20Images/bitnami-gitlab-cloud.png","https://about.gitlab.com/blog/how-to-easily-launch-gitlab-through-cloud-marketplaces","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to easily launch GitLab through cloud marketplaces\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Miranda Carter\"},{\"@type\":\"Person\",\"name\":\"Vick Kelkar\"}],\n        \"datePublished\": \"2020-09-30\",\n      }",{"title":5051,"description":5052,"authors":5057,"heroImage":5053,"date":2802,"body":5059,"category":726,"tags":5060},[5058,3410],"Miranda Carter","\n\nToday almost every enterprise in the world moved at least some of its mission-critical workloads into public cloud environments, making it increasingly important that customers can easily deploy and manage their software in any cloud. All of the major cloud vendors have introduced marketplaces where customers can quickly deploy applications into their cloud computing infrastructure.\n\n[Bitnami](https://bitnami.com/), now part of VMware, has long partnered with the leading cloud vendors to provide a library of open source software in their marketplaces that is always up-to-date, packaged using best practices, and completely free to end users. Bitnami and GitLab worked together for years on publishing [GitLab Community Edition (CE)](/install/?version=ce) as part of this library.\n\n### The Bitnami and GitLab partnership advantage\n\nGitLab CE provides value to millions of organizations and community contributors, and this has only been enhanced by our partnership with Bitnami. By taking the GitLab CE open [source code](/solutions/source-code-management/) and packaging it in a way that is always up-to-date and easy to use out-of-the-box on almost any cloud platform, Bitnami has helped make GitLab CE accessible to hundreds of thousands of users.\n\nThe GitLab team is working with Bitnami to eliminate the complexity of packaging our enterprise application for multiple cloud marketplaces, in the same way they do for GitLab CE. This partnership enables the various marketplaces to receive timely updates of the GitLab Enterprise Edition (EE) software packages whenever there is a security issue or dependency update.\n\n### GitLab Enterprise Edition packaged by Bitnami is available on Microsoft Azure marketplace\n\nToday, we are pleased to announce that our partnership with Bitnami has helped make [GitLab EE](/install/) available in the [Microsoft Azure marketplace](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/gitlabinc1586447921813.gitlabee?tab=Overview). GitLab EE customers will be able to seamlessly deploy and use the application in these environments thanks to Bitnami’s expertise in packaging and publishing software for the public cloud.\n\nExisting customers can bring their licenses and apply them to GitLab EE in any of these environments. GitLab EE is also published by Bitnami in the VMware Cloud marketplace.\n\n### Software support for marketplace packages\n\nCustomers who deploy GitLab EE packaged by Bitnami will enjoy the same enterprise-level support that GitLab customers receive in any other supported environment. Customers who have deployed GitLab software into the cloud infrastructure already through the cloud marketplace must follow the normal GitLab software upgrade process to address any critical issues and vulnerabilities.\n\n### About the authors\n\n_[Vick Kelkar](/company/team/#vkelkar) is on Alliances team at GitLab. He has experience developing and running products for container orchestrators like Cloud Foundry and Kubernetes._\n\n_Miranda Carter has been part of the Bitnami team for over six years, and came to VMware as part of the VMware acquisition last year. Miranda is now a Program Manager at VMware and focuses on supporting Tanzu Application Catalog and supporting ISVs whenever possible._\n",[855,109,9,231],{"slug":5062,"featured":6,"template":686},"how-to-easily-launch-gitlab-through-cloud-marketplaces","content:en-us:blog:how-to-easily-launch-gitlab-through-cloud-marketplaces.yml","How To Easily Launch Gitlab Through Cloud Marketplaces","en-us/blog/how-to-easily-launch-gitlab-through-cloud-marketplaces.yml","en-us/blog/how-to-easily-launch-gitlab-through-cloud-marketplaces",{"_path":5068,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5069,"content":5075,"config":5081,"_id":5083,"_type":14,"title":5084,"_source":16,"_file":5085,"_stem":5086,"_extension":19},"/en-us/blog/how-to-get-gitops-right-with-iac-security",{"title":5070,"description":5071,"ogTitle":5070,"ogDescription":5071,"noIndex":6,"ogImage":5072,"ogUrl":5073,"ogSiteName":670,"ogType":671,"canonicalUrls":5073,"schema":5074},"How to get GitOps right with infrastructure as code security","Learn how the GitLab and Indeni integration makes security a core component of your GitOps workflow.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663403/Blog/Hero%20Images/gitops-partner-cover-image.jpg","https://about.gitlab.com/blog/how-to-get-gitops-right-with-iac-security","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to get GitOps right with infrastructure as code security\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ulrica de Fort-Menares\"}],\n        \"datePublished\": \"2021-06-10\",\n      }",{"title":5070,"description":5071,"authors":5076,"heroImage":5072,"date":5078,"body":5079,"category":791,"tags":5080},[5077],"Ulrica de Fort-Menares","2021-06-10","\nIn today's competitive digital era, it is imperative for organizations to undergo a digital transformation to effectively compete. For many, achieving a digital transformation means transitioning toward a DevOps model.\n\nDevOps has been around for many years, and the development side of the house has benefitted from the core practices of DevOps. However, the infrastructure side of the house has been lagging behind, particularly when it comes to speed. With [infrastructure as code (IaC)](/topics/gitops/infrastructure-as-code/) and [GitOps](/topics/gitops/), infrastructure teams have been able to apply the same disciplines and quality gates that are used to manage application code to the infrastructure - to deliver products faster, with more predictability and at scale.\n\n## Security slowing down delivery\n\nWhile the GitOps concept promises faster and more frequent deployment, the last thing you want is to be slowed down by your legacy security programs. How often has your release stopped near the end of process because it failed the security gate? All too often security testing is tacked on at the end of delivery. Developers inevitably spend significant time and energy investigating these security issues, which delays the release. Uncovering issues late in the cycle is expensive and painful to fix, not to mention creating unnecessary stress.\n\nThe software development process has been shifting left to deliver better-quality software faster. By using IaC, you can adopt the same DevOps principle for the infrastructure. Learning from the development world, you should integrate security controls into the development lifecycle early and everywhere.\n\n## How to shift your IaC security checks left\n\nThe core of the partnership between Indeni and GitLab is about making security a key part of the GitOps practice. The [Indeni Cloudrail](https://indeni.com/cloudrail/) and GitLab CI/CD integration brings IaC security into the tools that developers are familiar with and want to use.\n\n![GitOps workflow](https://about.gitlab.com/images/blogimages/secure-gitops-workflow.png){: .shadow}\nHow GitLab CI/CD fits into the Indeni Cloudrail DevOps workflow.\n{: .note.text-center}\n\nThe joint solution modernizes security programs with the shift-left approach and automates infrastructure compliance. Developers no longer need to get in line for security reviews. Instead, IaC will be automatically evaluated for security impacts. Security controls are integrated into the development lifecycle before deployment.\n\n![GitOps workflow](https://about.gitlab.com/images/blogimages/secure-gitops1.jpg){: .shadow}\nCatching IaC security violations in GitLab CI/CD.\n{: .note.text-center}\n\nAs shown in the example above, Indeni Cloudrail provides feedback in GitLab CI. This way, security risks relating to the infrastructure can be instantly remediated when they are made so developers can move fast. You can think of the shift security left approach as testing IaC continuously and preventing insecure infrastructure from being deployed.\n\n## Don't let those noisy security tools impede your GitOps practice\n\nSecurity tools are notorious for being noisy with their many false positives. According to the Advanced Technology Academic Research Center [(ATARC) Federal DevSecOps Landscape survey](https://atarc.org/project/devsecops-survey/), too many false positives is the number one frustration with security testing. A noisy security tool can be counterproductive by inadvertently stopping the pipeline frustrating your developers.\n\nWhat makes Indeni Cloudrail unique is its context-based analysis, which refers to its ability to understand the relationships among cloud resources, making in-depth security analyses possible. Cloudrail also factors in already existing resources in the cloud environment to gain a holistic view as part of its analysis. The end result is three times less noise than any comparable IaC security tools in the market. In essence, Cloudrail will only bother developers with problems that truly matter to the organization. Learn more about [what makes Cloudrail unique in this blog post](https://indeni.com/blog/comparing-cloudrail-checkov-tfsec-and-kics-with-testing/).\n\n## Why GitLab and Indeni are better together\n\nBy delivering a developer-centric security tool for IaC, security has a better chance of gaining acceptance in the developer community. Together, Indeni and GitLab equip developers with the right tools to support a GitOps model and help organizations with their digital transformation.\n\n## Watch the demo\n\nWatch the Cloudrail demo to see the GitOps workflow for IaC security.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/9WSd0D87Vxc\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### About Indeni\n\n_[Indeni](https://indeni.com/) automates best practices for network security and cloud security. Its security infrastructure platform automates health and compliance checks for leading firewalls to maximize uptime and efficiency. Its Infrastructure-as-Code security analysis tool, Cloudrail, automates infrastructure compliance to prevent insecure cloud environments from being deployed._\n\nCover image by [Dimitry Anikin](https://unsplash.com/@anikinearthwalker) on [Unsplash](https://unsplash.com/photos/DsmjpJzm2i0)\n",[534,875,9,231],{"slug":5082,"featured":6,"template":686},"how-to-get-gitops-right-with-iac-security","content:en-us:blog:how-to-get-gitops-right-with-iac-security.yml","How To Get Gitops Right With Iac Security","en-us/blog/how-to-get-gitops-right-with-iac-security.yml","en-us/blog/how-to-get-gitops-right-with-iac-security",{"_path":5088,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5089,"content":5095,"config":5100,"_id":5102,"_type":14,"title":5103,"_source":16,"_file":5104,"_stem":5105,"_extension":19},"/en-us/blog/how-to-include-file-references-in-your-ci-cd-components",{"title":5090,"description":5091,"ogTitle":5090,"ogDescription":5091,"noIndex":6,"ogImage":5092,"ogUrl":5093,"ogSiteName":670,"ogType":671,"canonicalUrls":5093,"schema":5094},"How to include file references in your CI/CD components","Learn how to include scripts and dependencies in your CI/CD components to minimize duplications and simplify maintenance. This tutorial takes you step-by-step through the process.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664595/Blog/Hero%20Images/blog-image-template-1800x945__9_.png","https://about.gitlab.com/blog/how-to-include-file-references-in-your-ci-cd-components","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to include file references in your CI/CD components\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2024-10-16\",\n      }",{"title":5090,"description":5091,"authors":5096,"heroImage":5092,"date":5097,"body":5098,"category":1180,"tags":5099},[2120],"2024-10-16","I’m frequently asked whether included CI/CD components can reference additional files stored outside of the pipeline repository. While including components in your configuration is straightforward since they’re just YAML, many users want to know if those included components can access and execute additional files referenced by the components, like shell scripts or other dependencies. \n\nThis challenge has been a common topic of discussion in threads across the [GitLab Forum](https://forum.gitlab.com/t/gitlab-ci-includes-a-file-from-another-project-that-executes-a-script-file/111698) and [Reddit](https://www.reddit.com/r/gitlab/comments/18ma13x/gitlab_components_question/).\n\nNow for the good news: CI/CD components not only allow you to reuse pipeline configurations, saving time and effort, but you can also go a step further. With the new [CI/CD Steps](https://about.gitlab.com/blog/introducing-ci-cd-steps-a-programming-language-for-devsecops-automation/), you can directly reuse centralized automation scripts and dependencies in your pipelines. You'll gain even greater flexibility, making your pipelines more powerful and adaptable than ever.\n\nBy storing your scripts in a central location and wrapping them in CI/CD Steps, you can easily call these steps from your CI/CD components. This eliminates the need to duplicate scripts across multiple repositories and CI/CD configurations, streamlining your workflow and reducing redundancy.\n\nBefore we dive into the step-by-step guide, let’s briefly explore what CI/CD components and CI/CD Steps are.\n\n## What are CI/CD components?\n\n[CI/CD components](https://docs.gitlab.com/ee/ci/components/) are reusable units of pipeline configurations that get included in a pipeline when it’s created. The components bring additional jobs into the pipeline, however they can’t bring additional files as such reusable scripts. \n\n## What are CI/CD Steps?\n\n[CI/CD Steps](https://docs.gitlab.com/ee/ci/steps/) are reusable units of a job. Each step defines structured inputs and outputs that can be consumed by other steps. Steps can come from local files, GitLab.com repositories, or any other Git source. Steps offer a structured alternative to shell scripts for running jobs. They are modular, can be composed, tested, and easily reused, providing greater flexibility and maintainability.\n\n## What are the differences between CI/CD Steps and CI/CD components?\n\n- Component and step definitions look very similar but they take effect at different phases in pipeline execution. \n\n- Components are used when a pipeline is created while steps are used when individual jobs are running. \n\n- When a step is running, the whole repository is being downloaded into the job environment along with extra files. \n\n## A step-by-step guide\n\nHere is how CI/CD Steps and Components work together to access additional files.\n\n![CI/CD Steps flow diagram](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675829/Blog/Content%20Images/steps-diagram-for-blog.png)\n\nThis diagram illustrates the process flow: Jobs defined within components are imported into the pipeline configuration (`.gitlab-ci.yml`) when the pipeline is created. During the pipeline's execution, a job’s steps are executed, and the entire Git repository is downloaded to the [Step runner](https://docs.gitlab.com/ee/ci/steps/#using-steps) within the job’s context. This ensures that references to dependencies function correctly.\n\n**1\\. Define a component with `run` keyword that runs CI/CD Steps**\n\nRun is a new keyword that supports running steps, see the example code below. You can use [this guide](https://about.gitlab.com/blog/introducing-the-gitlab-ci-cd-catalog-beta/) to learn more on how to create Components. \n\n![template-yml](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675829/Blog/Content%20Images/Screenshot_2024-10-13_at_8.22.00.png)\n\n**2\\. Create a `step.yml` file in the project where your scripts and dependencies are located.**\n\nIn this code example, format.sh exists in the same directory as the `step.yml`. \n\n![step.yml](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675829/Blog/Content%20Images/Screenshot_2024-10-13_at_8.23.52.png)\n\n While the job is running, the Step runner will download the entire Git repository where the step is defined. The `${{ step_dir }}` step expression references the directory of the locally cached step files, allowing you to access other files from the repository. In the example above, the “format” step invokes the format.sh script.\n\n**3\\. Make sure that any files accessed by the step are located in the same repository as the `step.yml` file.**\n\n**4\\. Include the component in your CI/CD configuration.**\n\nSee this example code:\n\n![.gitlab-ci.yml](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675829/Blog/Content%20Images/Screenshot_2024-10-13_at_8.26.22.png)\n\nCode example: You can find the entire code demonstrated in this blog in this [GitLab Group](https://gitlab.com/gitlab-da/use-cases/ci-steps). \n\n**Important note:** The CI/CD Steps feature is currently [Experimental](https://docs.gitlab.com/ee/policy/experiment-beta-support.html#experiment), and the syntax may change as we continue to iterate and refine it based on user feedback. Any feedback should be provided via [this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/493694).\n\n## Learn more\n\n- Watch [this walkthrough](https://youtu.be/qxTbeYXEQLM) by [Joe Burnett](https://about.gitlab.com/company/team/#josephburnett), principal engineer at GitLab, as he demonstrates the example discussed in the blog post.\n\n- [Introducing CI/CD Steps](https://about.gitlab.com/blog/introducing-ci-cd-steps-a-programming-language-for-devsecops-automation/)\n\n- [Introducing CI/CD components](https://about.gitlab.com/blog/introducing-ci-components/)",[109,9,916,1180],{"slug":5101,"featured":6,"template":686},"how-to-include-file-references-in-your-ci-cd-components","content:en-us:blog:how-to-include-file-references-in-your-ci-cd-components.yml","How To Include File References In Your Ci Cd Components","en-us/blog/how-to-include-file-references-in-your-ci-cd-components.yml","en-us/blog/how-to-include-file-references-in-your-ci-cd-components",{"_path":5107,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5108,"content":5114,"config":5119,"_id":5121,"_type":14,"title":5122,"_source":16,"_file":5123,"_stem":5124,"_extension":19},"/en-us/blog/how-to-keep-up-with-ci-cd-best-practices",{"title":5109,"description":5110,"ogTitle":5109,"ogDescription":5110,"noIndex":6,"ogImage":5111,"ogUrl":5112,"ogSiteName":670,"ogType":671,"canonicalUrls":5112,"schema":5113},"How to keep up with CI/CD best practices","In this post, we look at continuous integration/continuous delivery (CI/CD), how to implement some best practices, and why it is important.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749661856/Blog/Hero%20Images/ci-cd-demo.jpg","https://about.gitlab.com/blog/how-to-keep-up-with-ci-cd-best-practices","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to keep up with CI/CD best practices\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-02-03\",\n      }",{"title":5109,"description":5110,"authors":5115,"heroImage":5111,"date":5116,"body":5117,"category":679,"tags":5118},[851],"2022-02-03","\nContinuous integration and continuous delivery (CI/CD) are at the heart of any successful DevOps practice. Teams wanting to achieve modern software development must keep up with [CI/CD](/topics/ci-cd/) best practices. Here’s what you need to know to make sure your team is on the right track.\n\n## What is the meaning of CI/CD?\n\nIt’s a tech process, it’s a mindset, it’s a series of steps… CI/CD is all of those things. Put simply, CI enables DevOps teams to streamline code development using automation. CI simplifies software builds and source code integration, enables version control, and promotes greater collaboration via automation. Where CI leaves off, continuous delivery kicks in with automated testing and deployment. Not only does CD reduce the amount of “hands on” time ops pros need to spend on delivery and deployment, it also enables teams to [drastically reduce the number of tools](/resources/whitepaper-forrester-manage-your-toolchain/) required to manage the lifecycle.\n\n## What are the best practices for CICD?\n\nIf you want to be successful with CI/CD, make continuous integration, delivery, and deployment your mantra as they are the cornerstones of software development practices. The goal of DevOps is to get software to users more quickly than traditional methods, and these development practices will help make that happen.\n\nIf you ask 10 DevOps teams for their take on CI/CD best practices, granted, you'll likely get 10 different answers. However, there are several tips that are widely agreed upon:\n\n1. Only build once: Don't create a new build for each stage because you risk introducing inconsistencies. Instead, promote the same build artifacts throughout each stage of the CI/CD pipeline. This requires an environment-agnostic build.\n\n2. Streamline the tests: Strike a balance between test coverage and performance. If it takes too long for test results users will try to circumvent the process.\n\n3. Fail fast: On the CI side, devs committing code need to know as quickly as possible if there are issues so they can roll the code back and fix it while it’s fresh in their minds. The idea of “fail fast” helps reduce developer context switching too, which makes for happier DevOps professionals.\n\n4. Make it daily: The more regular the code commits, the more benefit DevOps teams will see.\n\n5. Fix it if it’s broken: CI/CD makes it simple to fix broken builds.\n\n6. Clean pre-production environments:The longer environments are kept running, the harder it becomes to track all the configuration changes and updates that have been applied. This is good incentive to clean up pre-production environments between each deployment. \n\n7. Automation all the time: Keep tweaking the CI/CD pipeline to ensure the “continuous automation” state is achieved.\n\n8. Know the steps: Make sure the release and rollback plans are well documented and understood by the entire team.\n\n9. Keep it safe: CI/CD is a shift left, so it offers a good opportunity to integrate security earlier in the process.\n\n10. It’s a loop: Make sure there’s an easy way for the entire team to receive (and contribute to) feedback.\n\n## Continuous delivery best practices\n\nContinuous delivery/deployment feels like it deserves it’s own deep dive into best practices because CI often steals most of the headlines. Here is a roundup of CD best practices:\n\n- Start where you are: Don’t wait for a new platform. It’s always possible to tweak what you have to make it faster and more efficient.\n\n- Less is more: The best CD is done with minimal tools.\n\n- Track what’s happening: Issues and merge requests can get out of hand. If milestones are an option, they can help. Bonus: Milestones do double-duty when setting up Agile sprints and releases.\n\n- Automatically deploy changes: Streamline user acceptance testing and staging with automation.\n\n- Manage the release pipeline: Automation is the answer.\n\n- Establish monitoring: Keeping a good eye on the production process saves time and money. It also can provide key data points to the business side.\n\n- Kick off continuous deployment: Once continuous delivery is humming, bring on the hands-free deployment where it’s possible to send changes to production automatically. \n\n## How to improve the CI/CD pipeline\n\nA pipeline is just another way of characterizing the series of steps involved in deploying a new version of software. Monitoring and automation are concepts introduced in a CI/CD pipeline to improve the app development process, especially during the integration and testing phases, as well as when software is delivered and deployed.\n\nThe typical elements of a CI/CD pipeline are: plan, analyze, design, build, test, release, deploy, validation and compliance and maintenance. These steps can be done manually, but the real value of a CI/CD pipeline comes when they are automated.\n\nIf it’s time to finetune the CI/CD pipeline, consider the following performance enhancements:\n\n- Mix up the release strategy. A [canary release](https://martinfowler.com/bliki/CanaryRelease.html) (sometimes called a canary deployment) might be worth considering. In a canary release, new features are deployed to just a select group of users.\n\n- Add more automated testing because there is [never enough automated testing](/blog/want-faster-releases-your-answer-lies-in-automated-software-testing/). \n\n- Continue to pare down. Fewer tools mean fewer handoffs and steps. If CI/CD is part of a [DevOps platform](/topics/devops-platform/), everything will be in one place. \n\n- Consider a routine practice of [software composition analysis](https://www.csoonline.com/article/3640808/software-composition-analysis-explained-and-how-it-identifies-open-source-software-risks.html) to ensure the DevOps team is keeping track of critical open source software issues. \n\n## How to measure the success of CI/CD \n\nDevOps teams can’t know how well their CI/CD practices are going unless they measure them. [Metrics](https://about.gitlab.com/topics/ci-cd/continuous-integration-metrics/) play an important role in improving system performance and helping to identify where value can be added. They also provide a baseline for measuring the impact of any improvements made.\n\n Here are the best metrics to employ:\n\n### Cycle time\nThis refers to how long it takes to roll out a functional application from the time work on the code begins. To figure out the average life cycle time, measure the development process phases. This metric will provide insight into what the overall development time is and any bottlenecks in the process.\n\n### Time to value\nThis refers to how long it takes to release written code. The integration, testing, delivery, and deployment should take anywhere from minutes up to a few hours for test cycles to finish. If it takes days to move a build through the CI/CD pipeline time to value is not being realized and the process should be fine-tuned.\n\n### Uptime\nUptime is a measure of stability and reliability and whether everything is working as it should. It is one of the biggest priorities the ops team has. When the CI/CD strategy is automated, ops leaders can focus more of their time on system stability and less time on workflow issues.\n\n### Error rates\nApplication error rates is a fact of life in the development process. Tracking them is very important because not only can error rates indicate quality problems, but also ongoing performance and uptime related issues. \nIf uptime and error rates seem high, it can illustrate a [common CI/CD challenge](https://about.gitlab.com/blog/modernize-your-ci-cd/) between dev and ops teams. Operations goals are a key indicator of process success.\n\n### Infrastructure costs\nInfrastructure costs are critically important with cloud native development. Deploying and managing a CI/CD platform can result in big expenses if they are not kept in check.\nTo determine how they will set their prices, cloud providers will consider what the cost is of network hardware, infrastructure maintenance, and labor. \n\n### Team retention\nIt’s no mystery: When a developer – or anyone, really – feels valued and satisfied they’re apt to stick around. When teams work well together and know how to collaborate, retention is likely to follow. On the flip side, developers might feel uncomfortable speaking up if they don’t like how things are going, but looking at retention rates can help identify potential problems.\n\n##  What are the benefits of following CI/CD best practices?\n\nWhen best practices are followed, the [benefits of CI/CD](https://about.gitlab.com/topics/ci-cd/benefits-continuous-integration/) are felt throughout an organization: From HR to operations, teams work better and achieve goals. Establishing metrics around CI/CD performance can go beyond providing insights on development and carry over to many aspects of the business. \n\nA well-functioning CI/CD pipeline can be a game changer for DevOps teams. Here are some of the biggest benefits:\n\n**Developers aren’t fixing things, they’re writing code.** Fewer tools and toolchains mean less time spent on maintenance and more time spent actually producing high-quality software applications.\n\n**Code is in production.** Rather than sitting in a queue, code actually makes it out into the real world. This also leads to happier developers.\n\n**Developers have the bandwidth to focus on solving business problems.** A streamlined CI/CD process lets developers actually focus on what matters and not on the distractions of problem code, missed handoffs, production issues, and more.\n\n**It’s easier to innovate.** It’s a competitive world, and organizations need all the tools at their disposal to stay ahead. A well-built CI/CD process makes software development easier, faster and safer, which means DevOps teams have the time and energy to think outside the box.\n\n**Attract and retain talent.** It’s a very competitive labor market and DevOps talent can be very hard to impress. Nothing says “we take our DevOps team seriously” more than an organization that’s invested in the technology and processes around CI/CD.\n\n**Everyone does what they do best.** Dev, ops, sec and test each have a critical role to play, and CI/CD helps [clearly delineate the responsibilities](/topics/devops/build-a-devops-team/).\n\n## CI/CD deployment strategy\n\nRemember that CI/CD is about getting a software application into the hands of a customer that is better and done quicker than before. Organizations that adopt CI/CD find their productivity improves significantly. The trick is coming up with a deployment strategy that works for the individual organization. \n\nHere are some strategies to help make a deployment successful:\n\n- Commit to frequency in CD\n- Automate the build process\n- Run tests in parallel, and create a deployment pipeline\n- Fail fast and adopt a shift left mentality to give developers the skills and tools to accelerate without breaking things \n- Use CI tools that provide faster feedback\n\n## How can I implement CI/CD in my organization?\n\nBefore any software is implemented, it’s key to determine what the business drivers are and the same goes for adopting CI/CD. All development stakeholders should be involved early on in the implementation process. Developers should provide input since they will be the main users of a product. \n\nMake sure to do your due diligence when researching software that enables CI/CD, and ask about free trials. \n\nWhile it may seem counterintuitive since CI/CD is about accelerating the pace of software delivery in an automated fashion, start the process with a mentality of slow and steady. The boost in efficiency will decline if bugs are steadily moving into the finished application. \n\nIt’s important to have consistency in the integration process. Perform unit tests, trigger releases manually and track metrics. Then determine what can and should be automated.\n",[976,977,9],{"slug":5120,"featured":6,"template":686},"how-to-keep-up-with-ci-cd-best-practices","content:en-us:blog:how-to-keep-up-with-ci-cd-best-practices.yml","How To Keep Up With Ci Cd Best Practices","en-us/blog/how-to-keep-up-with-ci-cd-best-practices.yml","en-us/blog/how-to-keep-up-with-ci-cd-best-practices",{"_path":5126,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5127,"content":5132,"config":5138,"_id":5140,"_type":14,"title":5141,"_source":16,"_file":5142,"_stem":5143,"_extension":19},"/en-us/blog/how-to-learn-ci-cd-fast",{"title":5128,"description":5129,"ogTitle":5128,"ogDescription":5129,"noIndex":6,"ogImage":1842,"ogUrl":5130,"ogSiteName":670,"ogType":671,"canonicalUrls":5130,"schema":5131},"How to learn CI/CD fast","Continuous integration and continuous delivery (CI/CD) are critical to faster software releases and it's less complicated than it seems to get rolling. Here's how to start fast with CI/CD.","https://about.gitlab.com/blog/how-to-learn-ci-cd-fast","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to learn CI/CD fast\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mike Vanbuskirk\"}],\n        \"datePublished\": \"2022-04-13\",\n      }",{"title":5128,"description":5129,"authors":5133,"heroImage":1842,"date":5135,"body":5136,"category":769,"tags":5137},[5134],"Mike Vanbuskirk","2022-04-13","\nContinuous integration and continuous delivery (CI/CD) have become the keystone technical architecture of successful DevOps implementations. CI/CD has a reputation for being complex and hard to achieve, but that doesn’t have to be the case. Modern tools enable teams to get started with minimal configuration and infrastructure management. Here’s how you can “start fast” with CI/CD and get some quick, demonstrable performance wins for your DevOps team.\n\n## What does CI/CD mean?\n\n[CI/CD](/topics/ci-cd/) refers to a system or systems that enable software development to have continuous integration and continuous delivery capabilities. The architecture underpinning CI/CD is typically referred to as a pipeline, as software progresses through various stages akin to flowing through a pipe. What does [continuous integration and continuous delivery](/blog/basics-of-gitlab-ci-updated/) actually mean? Taking some time to explore the more granular details will help us set some goals for getting a fast start with CI/CD.\n\nStarting on the left side of the pipeline, continuous integration encompasses a variety of automation that occurs over the course of multiple stages, designed to test and provide quick feedback on different aspects of code quality, functionality, and security. CI testing can run the gamut from unit tests and linting run locally on a developer workstation, to full integration testing suites and static analysis. Anyone that's ever seen a small code change cause a significant outage or breakage upon reaching production knows the value of automated, repeatable testing, and the downsides of depending on manual testing.\n\nOnce a code change has passed testing, it's time to deploy. In legacy environments, system administrators and operations staff often had to manually transfer and install updates, and reboot servers to deploy new features. This type of manual work simply does not scale to the demands of the modern application ecosystem, and is error prone to boot. With continuous delivery, that code is automatically deployed to servers in a testable and deterministic way. Code [can be staged in environments](/blog/ci-deployment-and-environments/) with less strict SLAs, such as development, staging, and QA. Once it has been verified, the new features can be launched as production workloads. In some environments, \"continuous delivery\" becomes \"continuous deployment\", in which comprehensive testing automatically deploys new code through to production without human intervention.\n\nWhat's the ultimate goal of all this automation? It's what makes a successful software organization: faster deployment cadence.\n\n## Getting started with CI/CD\n\nWith a little background established, now it's time to focus on the key objective: getting up and running quickly. The primary goal here is to get a quick win with a CI/CD implementation to improve deployment velocity, and hopefully drive a larger effort towards standardizing on widespread and effective CI/CD usage.\n\nGetting started with CI/CD can appear daunting. There is a wealth of tools, services, and platforms available to provide specific functionality and end-to-end solutions for CI/CD. Some options like [Jenkins](https://www.jenkins.io) are self-managed; others, including GitLab, have a holistic CI/CD pipeline with integrated version control. \n\n## Build your pipeline\n\nRealistically, there is no magic bullet configuration for CI/CD. Each implementation will be highly dependent on a number of factors: the type of application being deployed, the size and skillset of the engineering team/s, the business requirements, and the scale of the application itself. The design and implementation considerations for an application that might see 100 users per day is vastly different from one that sees 1 million. The same holds true for CI/CD.\n\nBelow are 5 high-level strategies for tackling that first CI/CD pipeline:\n\n### 1. Start small\n\nDon't try to fix everything at once. Attempts to refactor an entire codebase or infrastructure will be a complex process, typically involving multiple layers of approval, discussion, planning, and possible pushback from dependent teams. It's much easier to choose a small subset of the application infrastructure to improve.\n\n### 2. Catch low-hanging fruit early\n\nSome of the simplest and easiest to detect (and fix) errors can end up causing the biggest problems if they make it into production workloads. However, it might not make sense to add unnecessary steps or complexity to the CI/CD pipeline. In this instance, it’s a good choice to configure some automatic testing to take place on developer machines before code is committed. Most Git DVCS providers, including GitLab, allow users to deploy pre-commit hooks. Pre-commit hooks are typically some type of script or automation that are triggered when specific actions occur. For example, when a developer initiates a new commit, a pre-commit hook might check that the code conforms to syntactical and structural standards, and is free from basic syntax errors. Other pre-commit hooks might ensure that unit tests are run successfully before a commit is allowed to proceed into the larger pipeline.\n\n### 3. Make security a part of CI/CD\n\nTests shouldn't just be limited to syntax and logic. Catching security issues early in the software development lifecycle (SDLC) means they are much easier, cheaper, and safer to fix. Adding some basic [static code analysis tools](/blog/rule-pack-synthesis/) and dependency checkers can vastly improve the security posture of an application by providing fast feedback and early detection of common security problems and potential vulnerabilities.\n\n### 4. Tailor tests to common issues\n\nMost engineering teams that rely on legacy deployment methodologies should be able to easily identify one or two common, recurring issues in deployments. Perhaps copying application code to servers via SCP always results in broken file permissions, or an [NGINX](https://www.nginx.com) frontend is never properly restarted. For the first iteration of [automated testing](/blog/want-faster-releases-your-answer-lies-in-automated-software-testing/), choose these specific issues to address with testing. This serves two purposes; it limits the scope of work and gives the team an achievable [\"definition of done,\"](https://www.leadingagile.com/2017/02/definition-of-done/) and it provides a highly visible success story by fixing the most problematic existing deployment problems. Once a working pipeline has been deployed and there is organizational buy-in, the testing suite can be expanded.\n\n### 5. Automate deployment to lower environments\n\nNew CI/CD implementations should [focus on continuous delivery](/blog/cd-solution-overview/), automatically deploying to a staging environment, and providing a manual decision interface for deploying to production. Continuous deployment is generally a step that should be taken further in the DevOps journey when there is more collective knowledge and technical maturity around automated deployments.\n\n## Get a fast start with CI/CD\n\nA good CI/CD implementation can measurably improve software deployment velocity and is a core pillar of a solid DevOps strategy. However, the first attempt at utilizing CI/CD should eschew heavy, complex deployments whenever possible, instead focusing on a \"batteries-included\" approach that provides teams with a short time-to-value cycle.\n\nOnce CI/CD provides that quick win, engineering teams can build on that momentum and buy-in to scale the solution across the entire organization, improving deployment speed and outcomes throughout.\n",[109,9,749],{"slug":5139,"featured":6,"template":686},"how-to-learn-ci-cd-fast","content:en-us:blog:how-to-learn-ci-cd-fast.yml","How To Learn Ci Cd Fast","en-us/blog/how-to-learn-ci-cd-fast.yml","en-us/blog/how-to-learn-ci-cd-fast",{"_path":5145,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5146,"content":5152,"config":5157,"_id":5159,"_type":14,"title":5160,"_source":16,"_file":5161,"_stem":5162,"_extension":19},"/en-us/blog/how-to-leverage-modern-software-testing-skills-in-devops",{"title":5147,"description":5148,"ogTitle":5147,"ogDescription":5148,"noIndex":6,"ogImage":5149,"ogUrl":5150,"ogSiteName":670,"ogType":671,"canonicalUrls":5150,"schema":5151},"How to leverage modern software testing skills in DevOps","Test automation is finally happening, but do teams have the necessary modern software testing skills? Here's what you need to know","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668307/Blog/Hero%20Images/test-automation-devops.jpg","https://about.gitlab.com/blog/how-to-leverage-modern-software-testing-skills-in-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to leverage modern software testing skills in DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lauren Minning\"}],\n        \"datePublished\": \"2022-07-05\",\n      }",{"title":5147,"description":5148,"authors":5153,"heroImage":5149,"date":5154,"body":5155,"category":679,"tags":5156},[892],"2022-07-05","\nTesting is a critical step in the software development lifecycle but also the part of the process most DevOps teams trip over. The solution — test automation — has been talked about for years but has been far easier said than done. However, with new technologies on the rise, test automation is taking off. DevOps teams need to be prepared with modern software testing skills. Here's how to get started.\n\n## The benefits of automated software testing\n\nIn [GitLab's 2021 Global DevSecOps Survey](/developer-survey/) of over 4,000 developers, security professionals, and operations team members, respondents agreed on one universal truth: Software testing is the biggest reason why development is delayed. \n\nIt’s critical to get software testing right because it’s financially disastrous to get it wrong. How much money do software mistakes add up to? Somewhere in the trillions. Yes, with a “t.”\n\n[DevOps.com](https://devops.com/this-is-not-just-a-test-devops-and-the-need-to-automate/) reported that software failures in companies’ operations systems cost a total of almost $1.6 trillion in the U.S. in 2019 alone. \n\nBut testing has traditionally been difficult to do efficiently and not particularly popular with developers. The solution? Test automation combined with modern software testing skills.\n\n## It’s a hands-on start\n\nDevOps teams looking to up their test game need to take a step back... into _manual_ testing.\n\n(The irony is not lost on us.)\n\nA manual testing mindset can actually improve all facets of automated software testing. As devs perform basic tests on their code as it’s being written, channeling their inner manual tester can be helpful. Whether it’s looking at the requirements again or running failed fixes *one more time*, that attention to detail should be brought into how automated test cases are built and executed. \n\n## Take the modern view\n\nOnce developers have incorporated some old-school habits into their test cases, it’s time to consider some fresh perspectives, up to and including a deep understanding of the organization’s goals and objectives.\n\nAccording to [Modern Testing](https://www.moderntesting.org), there are key principles of modern testing that every developer needs to be aware of for successful testing at any stage:\n\n- Job one is to make the business better. \n- Rely on trusted resources like [Lean Thinking](https://www.lean.org/explore-lean/what-is-lean/) and the [Theory of Constraints](https://www.leanproduction.com/theory-of-constraints/#:~:text=The%20Theory%20of%20Constraints%20is,referred%20to%20as%20a%20bottleneck).\n- Fail fast but focus on success.\n- Always be the customer when testing.\n- Do data-driven work. \n- Testers are evangelists. \n\n## Get certified\n\nAs the saying goes, every little bit helps. Though it is not required, a training program or certification course in software testing can enhance team capabilities.\n\nIf there's interest in this option, research courses online that might fit. From beginners to experienced testers, there’s something for everyone.\n\nNot sure where to start? Teams can explore the International Software Testing Qualifications Board (ISTQB) [Foundation Level Certification for CTFL certification](https://astqb.org/certifications/foundation-level-certification/). This is required before taking any other certifications (see [the full list of ISTQB prerequisites](https://astqb.org/certifications/#prerequisites)). After CTFL, there are many interesting certification options. \n\nThe [American Software Qualifications Board](https://astqb.org/certifications/), which offers the ISTQB certifications, is another great resource and has a helpful [Software Testing Career Road Map](https://astqb.org/benefits/road-map/). \n\n## Embrace new technologies\n\n[Artificial intelligence and machine learning](/blog/ai-in-software-development/) are at the core of test automation, so a thorough understanding of the technologies is a key modern software testing skill to have onboard. If AI/ML is already in use, ask to shadow or “apprentice” those working with it. Organize a Q&A for the DevOps team with an expert, and pull together a suggested reading list. The more understanding and experience, the easier it will be to get the most out of an ML bot.\n\n## Dive into the metrics\n\nAutomation is not only going to lead to faster releases, it's going to make it possible to do even more testing, which is great but, of course, also means there will be even _more_ data than ever before. It can be easy to feel overwhelmed by it all, so it's critical DevOps teams decide and [focus on the metrics that matter most](/blog/gitlab-top-devops-tooling-metrics-and-targets/) to the organization. It could be pipeline stability, time to first failure, or the \"age\" of open bugs... but whatever they are, they're important to continue to measure and understand.\n\n## The bottom line on modern software testing skills\n\nTesters, who’ve often been overlooked when it comes to DevOps fame and glory, have an opportunity to reinvent themselves and their QA roles if they can take advantage of modern software testing skills. It’s a critical step in the process that is finally getting some much needed attention and tech investment, so it makes sense to take it seriously.\n",[9,1158,875],{"slug":5158,"featured":6,"template":686},"how-to-leverage-modern-software-testing-skills-in-devops","content:en-us:blog:how-to-leverage-modern-software-testing-skills-in-devops.yml","How To Leverage Modern Software Testing Skills In Devops","en-us/blog/how-to-leverage-modern-software-testing-skills-in-devops.yml","en-us/blog/how-to-leverage-modern-software-testing-skills-in-devops",{"_path":5164,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5165,"content":5170,"config":5175,"_id":5177,"_type":14,"title":5178,"_source":16,"_file":5179,"_stem":5180,"_extension":19},"/en-us/blog/how-to-make-your-devops-team-elite-performers",{"title":5166,"description":5167,"ogTitle":5166,"ogDescription":5167,"noIndex":6,"ogImage":3224,"ogUrl":5168,"ogSiteName":670,"ogType":671,"canonicalUrls":5168,"schema":5169},"How to make your DevOps team elite performers","Every company wants DevOps done better. The DORA Report spotlights what it takes to be a DevOps elite, and what teams need to do to get there.","https://about.gitlab.com/blog/how-to-make-your-devops-team-elite-performers","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to make your DevOps team elite performers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2021-10-26\",\n      }",{"title":5166,"description":5167,"authors":5171,"heroImage":3224,"date":5172,"body":5173,"category":769,"tags":5174},[810],"2021-10-26","\n\nSo your company has a DevOps team –  great! – but are they elite performers or low performers?\n\nThere’s a chasm of difference between the two, according to the [State of DevOps 2021 report](https://gitlab.com/gitlab-com/www-gitlab-com/uploads/069ee8e2ee6af463cf0aafcd89eda33e/state-of-devops-2021.pdf) from DORA, the DevOps Research and Assessment team at Google. It’s the tipping point in how resilient, efficient and reliable your team is, and that’s directly tied to your ability to help your business be more competitive. (To be transparent, GitLab was one of the many sponsors of the report, and we’ve incorporated some of the DORA metrics [within our DevOps Platform](https://gitlab-com.gitlab.io/cs-tools/gitlab-cs-tools/what-is-new-since/?tab=features&s[…]tegories=DevOps+Reports&textSearch=DevOps&minVersion=13_08) so you can compare your highest and lowest-performing teams and see how much of the DevOps lifecycle each one is embracing.)\n\nBragging rights aside, a personal -- and not insignificant -- benefit of being on an elite DevOps team is that your [company value](/blog/a-look-at-devops-salaries/), as well as your [salary](/blog/four-tips-to-increase-your-devops-salary/), would likely rise, as would your ability to be hired at a top-tier company. \n\nSo what does it mean to be an elite DevOps team and what does it take to get there? Let’s dive in:\n\n## The benefits of being an elite team\n\nAccording to the DORA report there are specific things elite teams are able to consistently do. Here’s a look at some big goals:\n\n### Deploy more frequently\n\nElite performers deploy code 973 times more frequently than low performers, the survey notes. That’s right -- 973 times more. Low performers say they require a change lead time greater than six months. In sharp contrast, elite teams only need an hour. We’ll do the math for you: Elite teams have a 6,570 times faster lead time from commit to deploy than low performers.\n\n### Recover quicker\n\nThere’s a similar broad gap between low performers and elite teams when it comes to stability. DORA notes the time it takes the elite group to restore service is less than one hour, compared to more than six months for the low performers. \n\n### Lower change failure rates\n\nWhen it comes to change failure rates, there’s a 3 times difference between top and bottom performers. That means the elite group’s changes are a third less likely to fail. \n\n## DORA’s tips on how to become an elite team\n\nThose are great goals but how do you make them a reality? These six tips will take you in the right direction\n\n### 1. Make smart use of hybrid and multi-cloud environments\n\nDORA survey respondents who use either hybrid cloud or [multi-cloud](/topics/multicloud/) environments were 1.6 times more likely to beat their company’s performance targets than those who did not use these cloud setups. Multi-cloud users, for instance, say they are able to leverage each cloud provider’s unique benefits and achieve greater availability.\n\n### 2. How you implement the cloud matters\n\nWhen it comes to being able to support business needs, how the cloud is adopted and implemented makes a big difference. There’s a lot of benefit to adhering to the National Institute of Standards and Technology’s (NIST) [five essentials of cloud computing](https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.500-291r2.pdf): on-demand self-service, broad network access, resource pooling, rapid elasticity or expansion, and measured service. DORA noted elite performers were 3.5 times more likely to have met all essential NIST cloud characteristics.\n\n### 3. Let DevOps and SRE complement each other\n\nTop DevOps professionals understand they don’t have to choose between DevOps and [site reliability engineering (SRE)](https://handbook.gitlab.com/job-families/engineering/infrastructure/site-reliability-engineer/). They work well together. “Elite performers are 2.1x as likely to report the use of SRE practices as their low-performing counterparts,” the DORA report notes. “But even teams operating at the highest levels have room for growth: Only 10% of elite respondents indicated that their teams have fully implemented every SRE practice we investigated.”\n\n### 4. Make sure you’re documenting\n\nThere’s a direct correlation between creating documents, which include everything from manuals to code comments, to a DevOps team’s success. Solid documentation is accurate, up-to-date, comprehensive, searchable, well organized and clear. The report points out that teams with good documentation are 2.4 times more likely to meet or exceed their reliability targets, and 2.5 times more likely to fully leverage the cloud.\n\n### 5. Build in security throughout development\n\nSecurity can get [a lot of lip service in DevOps](/blog/developer-security-divide/), but the best teams know that high delivery and operational performance are directly linked to integrating security practices throughout their development process. Security reviews must be integrated into every phase and applied to all major features, security professionals must be included in planning and development, and security testing must be automated.\n\n### 6. Pay attention to your team culture\n\nIn short, culture matters -- a lot. Industry surveys consistently show that culture is one of the top drivers of IT performance. Professionals who [have a sense of belonging and inclusion](/blog/why-software-developer-job-satisfaction-matters-and-how-to-make-it-happen/), and who work collaboratively and cross-functionally, produce higher software delivery and operational performance. \n\nAccording to the DORA report, it’s clear that becoming an elite team of DevOps professionals is an attainable goal. The report notes a dramatic increase in the percentage of elite professionals this year: 26% (of 1,200 surveyed), up from just 7% in 2018. \n\nIt’s time to up your game or risk being left behind.\n\n_For a slightly different look at aspirational DevOps results, read our [2021 Global DevSecOps Survey](/developer-survey/)._\n\n",[9,813,3232],{"slug":5176,"featured":6,"template":686},"how-to-make-your-devops-team-elite-performers","content:en-us:blog:how-to-make-your-devops-team-elite-performers.yml","How To Make Your Devops Team Elite Performers","en-us/blog/how-to-make-your-devops-team-elite-performers.yml","en-us/blog/how-to-make-your-devops-team-elite-performers",{"_path":5182,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5183,"content":5189,"config":5196,"_id":5198,"_type":14,"title":5199,"_source":16,"_file":5200,"_stem":5201,"_extension":19},"/en-us/blog/how-to-migrate-atlassians-bamboo-servers-ci-cd-infrastructure-to-gitlab-ci-part-two",{"title":5184,"description":5185,"ogTitle":5184,"ogDescription":5185,"noIndex":6,"ogImage":5186,"ogUrl":5187,"ogSiteName":670,"ogType":671,"canonicalUrls":5187,"schema":5188},"Bamboo Server to GitLab CI migration: Advanced techniques","A real-world look at how a migrated CI/CD infrastructure will work in GitLab CI.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679081/Blog/Hero%20Images/jenkins-migration.jpg","https://about.gitlab.com/blog/how-to-migrate-atlassians-bamboo-servers-ci-cd-infrastructure-to-gitlab-ci-part-two","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to migrate Atlassian's Bamboo server's CI/CD infrastructure to GitLab CI, part two\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ivan Lychev\"}],\n        \"datePublished\": \"2022-07-11\",\n      }",{"title":5190,"description":5185,"authors":5191,"heroImage":5186,"date":5193,"body":5194,"category":791,"tags":5195},"How to migrate Atlassian's Bamboo server's CI/CD infrastructure to GitLab CI, part two",[5192],"Ivan Lychev","2022-07-11","\nIn [part one of our series](/blog/migration-from-atlassian-bamboo-server-to-gitlab-ci/), I showed you how to migrate from Atlassian’s Bamboo Server to GitLab CI/CD. In this blog post we’re going to take a deep dive into how it works from a user’s perspective.\n\n## Get started\n\nYou’ve deployed the demo so it’s time to play with it to understand how it works.\n\nLet's imagine that one of the members of our project is John Doe. He is a software engineer responsible for developing some components (app1, app2, and app3) of the entire product, and he and his team would like to test those components in several combinations in myriad preview environments. So, what does that look like?\n\nFirst of all, let’s make some commits to the app1, app2, and app3 source code and get successful builds upon those commits.\n\nAfter that, we should create releases for those apps to be able to deploy them (as the deployment part of the apps CI config only shows when being triggered by a Git tag, i.e., a GitLab release). A release can be created by launching the last step (`manual-create-release`) in a commit pipeline. That would give us a new release with the ugly name containing the date and commit SHA in the patch part (in accord to `semver` scheme):\n\n\n\n![app_gitlab_release](https://about.gitlab.com/images/blogimages/app_gitlab_release.png)\n\nOn the `Tags` tab for the same app you now can see a deployment part of the pipeline has been triggered by the just created GitLab release but no actual environments to deploy are displayed (the `_` item in the `Deploy-nonprod` stage is not an env):\n\n\n![absent_envs](https://about.gitlab.com/images/blogimages/absent_envs.png)\n\n\n## Create an environment\n\nBut before that we have to briefly switch to another team who is responsible for preparing infrastructure IaC templates. Navigate to the `infra/environment-blueprints` project and pretend you are a member of that team doing their job. Namely, imagine you have just created some initial set of IaC files (they are already kindly prepared by me and present in the repository). You’ve tested them and now you feel that they are ready to be used by the other members of the project. You indicate such a readiness of a particular version of the IaC files by giving it a GitTag. Let’s put a tag like `v1.0.0` onto the HEAD version.\n\nYou will see how the tags are going to be used immediately. But first let's make some changes to the IaC files (e.g., add a new resource for some of the apps) and create a second Git tag, let's say `v1.1.0`. So, at this moment we have two versions of IaC templates (or `blueprints`) for our infrastructure - `v1.0.0` and `v1.1.0`.\n\n## Deploy an app into the environment\n\nNow we can return back to John and his team. We assume John is somehow informed that the version of the IaC templates he should use is `v1.0.0`. He wants to create a new preview environment out of the IaC templates of that version and put app1 and app2 into that env. \n\n(Here starts a description of how a user interoperates with the `infrastructure-set` Git repo. Notice that though the eventual idea is that it should be a Merge Request workflow – where you first get a Terraform plan within a Merge Request and can apply such a plan by merging the MR – which is widely advocated by GitLab but for the sake of simplicity here the MR workflow is not implemented and instead direct push commits into a branch are made).\n\nJohn wants the env to be named `preview-for-johns-team`. He creates a new branch in the `infrastructure-set` repo with that name and puts two files into it: a `version.txt` containing text `v1.0.0` and `apps.txt` with text `app1 app2` inside (the files format and its content is utterly simplified). \n\nThe `infrastructure-set` pipeline is triggered by the new branch and first generates a Terraform plan using the set of the Terraform files indicated by the tag specified in `version.txt`. John reviews the plan and wants to proceed with creating the environment by starting the `Terraform-apply` stage:\n\n\n![new_env_pipeline](https://about.gitlab.com/images/blogimages/new_env_pipeline.png)\n\n\n(To store the Terraform plan as artifact and Terraform state the embedded features of GitLab are leveraged - [Package Registry](https://docs.gitlab.com/ee/user/packages/package_registry/) and [Terraform HTTP back-end by GitLab](https://docs.gitlab.com/ee/user/infrastructure/iac/terraform_state.html).)\n\nNow return to the `app1` project and rerun the pipeline for the app1 release we created previously to make it regenerate a list of environments to deploy. You should see that the `preview-for-johns-team` item has appeared in the list of the environments:\n\n\n![new_env_in_the_deploy_pipeline](https://about.gitlab.com/images/blogimages/new_env_in_the_deploy_pipeline.png)\n\n\nClick the arrow button to deploy. Then refer to the `Deployments/Environments` section of the `app1` project to ensure a new env with the app1 release deployed into it is displayed.\n\nWe have successfully created a new environment and deployed one of the apps into it!\n\nNotice that although the above describes how users manually deploy the applications into an environment after it has been created which doesn’t look really convenient, in a real life scenario we most likely would have some additional step in the `infrastructure-set` pipeline that runs after Terraform successfully finishes creating an environment and triggers deployment pipelines for all the applications specified in the `apps.txt`. In that situation, we would need to establish which versions of the applications should be deployed in such an automated manner - for example, those might be the latest versions available for each app or the versions currently deployed to production, etc.\n\n## Update an environment's infrastructure\n\nJohn got notified that a new version of the infrastructure templates is available (you remember that `v1.1.0` tag in the `environment-blueprints` repo?). His team wants to assess how app1 would work within the new conditions. They decide to update an existing env, namely `preview-for-johns-team`, for that purpose. \n\nJohn walks to the `preview-for-johns-team` branch of the `environment-set` repo and changes `version.txt`'s content from `v1.0.0` to `v1.1.0`. The branch pipeline gets triggered and first shows John a Terraform plan for a diff comparing the current state of the environment. After reviewing and accepting that diff, John proceeds with actual updating the environment by launching `Terraform-apply` stage. That's it!\n\n## Advantages and disadvantages\n\n### Virtues\n\nGiven that this case assumes migrating from some existing CI/CD infrastructure based on Atlassian Bamboo with a lot of users who are familiar with it, the proposed solution leverages the native capabilities of GitLab so that it mostly keeps the concepts and workflows used with Bamboo. This strategy makes the process of migration more smooth for the users.\n\nThe solution sticks to the GitOps tenets and empowers a project with all the virtues provided by Git. For example, it's usually easy to track any changes in the infrastructure back to Git repos. (It may not be so easy for the `environment-set` project where we do not have the infrastructure changes captured in Git commits, but in that case a task of finding differences between two states of a particular environment can be accomplished by fetching the two versions of the `environment-blueprints` repo corresponding to those states denoted in the `version.txt` and figuring out the differences by using any apt tool.)\n\nThe solution tends to support user self-service where most of the tasks of changing the infrastructure can be performed only by those familiar with the basics of Git and Terraform. As a result, it offloads the DevOps team from some part of the work and removes dependence on the Ops department which comes in really handy, especially for large-scale projects.\n\n### Shortcomings\n\nBesides the mentioned deficits which stem from the necessity to utterly simplify all the aspects of this demo to make it comprehensible and possible to prepare in a sensible amount of time, this solution possesses some shortcomings that have to be resolved by using external tools to make this solution appropriate for a real life usage.\n\nFor example, there is no way to have a central dashboard with an aggregated view of all the environments with all the apps and their versions deployed into the envs. This would require creating some custom SPA web app which would gather information from GitLab via API.\n",[109,9,978],{"slug":5197,"featured":6,"template":686},"how-to-migrate-atlassians-bamboo-servers-ci-cd-infrastructure-to-gitlab-ci-part-two","content:en-us:blog:how-to-migrate-atlassians-bamboo-servers-ci-cd-infrastructure-to-gitlab-ci-part-two.yml","How To Migrate Atlassians Bamboo Servers Ci Cd Infrastructure To Gitlab Ci Part Two","en-us/blog/how-to-migrate-atlassians-bamboo-servers-ci-cd-infrastructure-to-gitlab-ci-part-two.yml","en-us/blog/how-to-migrate-atlassians-bamboo-servers-ci-cd-infrastructure-to-gitlab-ci-part-two",{"_path":5203,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5204,"content":5210,"config":5215,"_id":5217,"_type":14,"title":5218,"_source":16,"_file":5219,"_stem":5220,"_extension":19},"/en-us/blog/how-to-move-from-ic-to-devops-manager-and-succeed",{"title":5205,"description":5206,"ogTitle":5205,"ogDescription":5206,"noIndex":6,"ogImage":5207,"ogUrl":5208,"ogSiteName":670,"ogType":671,"canonicalUrls":5208,"schema":5209},"How to move from IC to DevOps manager and succeed","Transitioning from great DevOps engineer to great DevOps manager isn't always easy. Here are some tools to help you get a management role and keep it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663753/Blog/Hero%20Images/managers-more-optimistic-than-developers.jpg","https://about.gitlab.com/blog/how-to-move-from-ic-to-devops-manager-and-succeed","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to move from IC to DevOps manager and succeed\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lauren Gibbons Paul\"}],\n        \"datePublished\": \"2022-03-01\",\n      }",{"title":5205,"description":5206,"authors":5211,"heroImage":5207,"date":5212,"body":5213,"category":679,"tags":5214},[1707],"2022-03-01","\nAs a seasoned [DevOps engineer](https://about.gitlab.com/topics/devops/what-is-a-devops-engineer/), an individual contributor (IC) role might eventually start to chafe. Here are 5 strategies to make the case that you're ready for a DevOps manager role, and 3 key things to keep in mind once you get there.\n\n## DevOps manager: More than just the title\n\nJust as many organizations don’t have dedicated DevOps teams – they just do DevOps – many will not have a title that sounds like “DevOps manager.” It is not uncommon for your current position to morph into a managerial role, so let your manager know you’re interested. Also, it never hurts to come into that conversation with a transition plan already in hand.\n\nUntil then, hone the skills that make for a good manager of any type – things like being a good communicator, mentoring others, and fostering collaboration. [Collaboration is a must-have](https://www.techrepublic.com/article/how-to-become-a-devops-manager-5-tips/#:~:text=A%20good%20DevOps%20manager%20encourages,learn%20and%20develop%2C%20Kromhout%20said) in a management role. \n\n## DevOps manager skills\n\nDevOps manager skills include deep technical expertise in at least one area, such as systems architecture, along with broad technical experience. Ideally, a manager will have the ability to program in multiple languages to give relevant feedback and better understand the tools and support team members need. You’ll also need to understand how to respond to security incidents. \n\n[Sharpening and adding to your technical skills](https://victorops.com/blog/being-a-devops-team-manager) – and learning new ones – are some of the best things you can do to make yourself more attractive as a potential DevOps manager. Not only will new skills help [advance your career](/blog/the-top-skills-you-need-to-get-your-devops-dream-job/), they’ll help your paycheck even if you decide to remain an individual contributor. And don’t forget the “impress your boss” benefits of being a [continuous learner](/blog/best-advice-for-your-devops-career-keep-on-learning/).\n\n## Understand the expectations (and implications) of management\n\nWhen examining DevOps manager roles and responsibilities, job number one is mediating the interpersonal skirmishes among team members and with other groups. Alone, that can be challenging enough, but it’s just the starting point. \n\nBefore you take on the role, make sure you understand what is involved. A DevOps manager will be expected to help set goals and timelines, oversee project management, obtain needed tools and skills, understand the work teams are doing, advocate for team interests within the wider organization, evangelize, and generally be a cheerleader for anyone who needs it. And don’t forget, [cheerleading is serious business](https://www.agileconnection.com/article/management-myth-11-team-needs-cheerleader) in many organizations. A DevOps manager also needs a good network, and to be able to bring people onboard to fill skills gaps.  \n\n## Find a mentor\n\nMany companies offer mentorship programs, including [GitLab](/handbook/people-group/learning-and-development/mentor/), and they can be a tremendous resource for someone looking to grow into a management role. A mentor doesn't have to be a technology leader – learning management from someone in marketing, sales, or finance is useful as well.\n\n## Volunteer for an interim role\n\nWhether it's the \"great resignation\" or simply the usual tech churn, turnover can mean teams need \"interim\" leaders. Being an interim manager can be an opportunity to get your feet wet, help out in an area that might not be completely familiar, and show your willingness to stretch, learn new things, and be a true team player. Obviously, many interim roles don't turn into permanent ones, but they still offer experience that can help build a case for a promotion to management. \n\n## You're a manager now\n\nOnce you’ve stepped into a DevOps manager role, here are three ways to be successful:\n\n1. **Lead the change**. The concept of DevOps obviously means that development and operations are working together, but it also requires working closely with other functional areas with a culture of openness. Good managers break down organizational silos and help people assimilate and embrace the changes needed for successful DevOps. The best DevOps managers are able to bridge communication gaps, tearing down the walls between functions – especially developers, IT operations, and security – and strive to instill a sense of [shared purpose and empathy](https://www.toptal.com/devops/bridging-gaps-devops-communication#:~:text=What%20is%20DevOps%20in%20simple,using%20common%20processes%20and%20tools).\n\n2. **Focus on the processes and the metrics**. A successful DevOps manager is able to toggle quickly between personnel and process. Fine-tuning the [CI/CD pipelines](/topics/ci-cd/), test automation, multi-cloud options, and cutting-edge technology choices like Kubernetes and AI/ML will require a continuous improvement mentality and a serious reliance on metrics. If you can’t measure performance, it’s tough to improve it. Also, by focusing on incremental performance increases, a DevOps manager not only increases development velocity, but is in a good place [to plan for the future](https://www.techopedia.com/devops-managers-explain-what-they-do/2/33379).  \n\n3. **Don’t overlook training for the team**. Technical skills are the lifeblood of the DevOps team, and they need constant updating. But most people feel they are too busy to take time for training [and some of it may not be particularly compelling](https://www.agileconnection.com/article/management-myth-9-we-have-no-time-training). Your challenge as a DevOps manager is to first convince your managers that the training is justified and then to persuade your team to make time for it. Find the right kind of training and offer it to the people who need it, when they need it. Delivering learning in chunks of five to 10 minutes, also known as microlearning, has been proven much more engaging for employees and drives retention. So, look for training employees can schedule and do on their own time and terms – and ideally [via mobile devices](https://elearningindustry.com/microlearning-vs-macrolearning-for-corporate-training#:~:text=According%20to%20research%2C%20microlearning%20is,bringing%20learning%20to%20the%20employees).\n",[9,813,749],{"slug":5216,"featured":6,"template":686},"how-to-move-from-ic-to-devops-manager-and-succeed","content:en-us:blog:how-to-move-from-ic-to-devops-manager-and-succeed.yml","How To Move From Ic To Devops Manager And Succeed","en-us/blog/how-to-move-from-ic-to-devops-manager-and-succeed.yml","en-us/blog/how-to-move-from-ic-to-devops-manager-and-succeed",{"_path":5222,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5223,"content":5229,"config":5234,"_id":5236,"_type":14,"title":5237,"_source":16,"_file":5238,"_stem":5239,"_extension":19},"/en-us/blog/how-to-protect-gitlab-connected-ssh-key-with-yubikey",{"title":5224,"description":5225,"ogTitle":5224,"ogDescription":5225,"noIndex":6,"ogImage":5226,"ogUrl":5227,"ogSiteName":670,"ogType":671,"canonicalUrls":5227,"schema":5228},"How to protect GitLab-connected SSH key with Yubikey","Add a layer of security to SSH keys by restricting physical access to YubiKey.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667202/Blog/Hero%20Images/gitlabultimatesecurity.jpg","https://about.gitlab.com/blog/how-to-protect-gitlab-connected-ssh-key-with-yubikey","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to protect GitLab-connected SSH key with Yubikey\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2022-03-03\",\n      }",{"title":5224,"description":5225,"authors":5230,"heroImage":5226,"date":5231,"body":5232,"category":791,"tags":5233},[766],"2022-03-03","\n[Two-factor authentication](https://docs.gitlab.com/ee/security/two_factor_authentication.html) is one of the best defenses we have as individuals for protecting our accounts and credentials. But not all 2FA methods are created equal. For example, SMS is vulnerable to [SIM-swapping](https://www.ic3.gov/Media/Y2022/PSA220208) attacks and thus doesn't always provide the extra security we would like.\n\nIdeally, everything I  want to connect to would use 2FA with dedicated 2FA hardware. With GitLab 14.8, you can now use 2FA hardware to protect your SSH keys, as I explain below.  \n\n## 2FA and SSH keys\n\nState-of-the-art 2FA uses a physical hardware device – often FIDO/U2F hardware – to verify your presence at the time of authentication. This provides two distinct factors as a means of authentication: something you know (your username and password, for instance) with something you have (the physical device). I have two [YubiKey](https://www.yubico.com/works-with-yubikey/catalog/gitlab/) devices that I use for this purpose – one that is always in a safe in my house and one that I generally keep with me and the computer I'm using to do work. And I have everything I can secure using this method, including my GitLab account.\n\nAnd that does a great job of securing my access to GitLab, the application front end, and the ability to create and modify API keys. But there is another way to authenticate to a git server: SSH keys. In this case, there's only one factor of authorization because the SSH key is on my computer. So you can imagine how excited I was to hear that GitLab added support for `ecdsa-sk` and `ed25519-sk` key types in [GitLab 14.8](/releases/2022/02/22/gitlab-14-8-released/#support-for-ecdsa-sk-and-ed25519-sk-ssh-keys).\n\n### What are `ecdsa-sk` and `ed25519-sk`?\n\nThese two new keys are close to the existing `ecdsa` (Elliptic Curve Digital Signature Algorithm) and `ed25519` (Edwards Curve Digital Signature Algorithm) keys already supported. But that `-sk` at the end adds the ability to verify the key with a FIDO/U2F device. \"SK\" here stands for \"security key\". [OpenSSH 8.2](https://www.openssh.com/txt/release-8.2) added this key type to the supported keys it can generate, interacting with the hardware device to authenticate user presence before allowing the key to be used.\n\nHowever, I still had a few things to do to be ready to use the new keys.\n\n## Updating OpenSSH \nMy daily driver computer is a 2021 iMac running macOS Big Sur version 11.6. When I ran to it to generate this new key, I encountered a problem. Supposedly my version of SSH didn't support `-sk` keys!\n\nNow, your mileage may vary here, but I was able to update the version of SSH my Mac uses by default by first running `brew install openssh`, which successfully installed OpenSSH 8.8. But when I ran `ssh -V` it still showed version 8.1. So how could I get the system to use the newly installed OpenSSH instead?\n\nThe easiest way I could think of to do that was to put the Homebrew version first in the $PATH variable. But where is that path? Luckily, I was able to find that (`/opt/homebrew/opt/openssh`) by running this command:\n\n`brew --prefix openssh`\n\nOnce I updated my $PATH variable to have that at the front, I got the desired outcome:\n\n```bash\n$  which ssh\n/opt/homebrew/opt/openssh/bin/ssh\n\n$ ssh -V\nOpenSSH_8.8p1, OpenSSL 1.1.1m  14 Dec 2021\n```\n\n## Generating the key\nNow that I was using the correct version of SSH, I was able to create my `ecdsa-sk` key by running: \n\n```bash\nssh-keygen -t ecdsa-sk -f ~/.ssh/id_ecdsa_sk\n```\n\nNow, the specific device I have only supports ECDSA and not EdDSA, which is why I went with `ecdsa-sk`. There also is an option to have the key reside ON the device itself (if supported by your hardware) with the `-O resident` flag like this:\n\n```bash\n$ ssh-keygen -t ecdsa-sk -O resident -f ~/.ssh/id_ecdsa_sk\n\nEnter PIN for authenticator:\nYou may need to touch your authenticator (again) to authorize key generation.\nEnter passphrase (empty for no passphrase):\nEnter same passphrase again:\nYour identification has been saved in /Users/brendan/.ssh/id_ecdsa_sk\nYour public key has been saved in /Users/brendan/.ssh/id_ecdsa_sk.pub\n```\n\nGenerating a resident key will make sharing this key with a new computer if and when that happens much easier. If you have a YubiKey like me, you can set the FIDO2 PIN using the [YubiKey Manager](https://www.yubico.com/support/download/yubikey-manager/) software.\n\n## Adding the key to GitLab\nNow that I had the complex parts covered, all that was left was to add the key to GitLab. I went to my [SSH settings](https://gitlab.com/-/profile/keys) on GitLab.com and (bravely) deleted my old SSH key and added the `.pub` public part of my key to my profile.\n\nAnd it was that simple! Now every time I go to interact with GitLab.com, I'm prompted to confirm my presence by touching the YubiKey device attached to my computer:\n\n```bash\ngit clone git@gitlab.com:brendan/website.git\nCloning into 'website'...\nConfirm user presence for key ECDSA-SK SHA256:OZSZGwbnnbc...\n\ngit add .\ngit commit -m \"A new commit\"\ngit push\nConfirm user presence for key ECDSA-SK SHA256:OZSZGwbnnbc...\n```\n\nThat small but essential change gives me peace of mind that even if someone could somehow get my private SSH key, I would still be protected by having physical access restricted to my YubiKey.\n\n",[9,875,978],{"slug":5235,"featured":6,"template":686},"how-to-protect-gitlab-connected-ssh-key-with-yubikey","content:en-us:blog:how-to-protect-gitlab-connected-ssh-key-with-yubikey.yml","How To Protect Gitlab Connected Ssh Key With Yubikey","en-us/blog/how-to-protect-gitlab-connected-ssh-key-with-yubikey.yml","en-us/blog/how-to-protect-gitlab-connected-ssh-key-with-yubikey",{"_path":5241,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5242,"content":5248,"config":5255,"_id":5257,"_type":14,"title":5258,"_source":16,"_file":5259,"_stem":5260,"_extension":19},"/en-us/blog/how-to-protect-your-source-code-with-gitlab-and-jscrambler",{"title":5243,"description":5244,"ogTitle":5243,"ogDescription":5244,"noIndex":6,"ogImage":5245,"ogUrl":5246,"ogSiteName":670,"ogType":671,"canonicalUrls":5246,"schema":5247},"How to protect your source code with GitLab and Jscrambler","Learn how to seamlessly protect your source code at build time in just a few steps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669529/Blog/Hero%20Images/gitlab-jscrambler-blog-post-protecting-source-code.png","https://about.gitlab.com/blog/how-to-protect-your-source-code-with-gitlab-and-jscrambler","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to protect your source code with GitLab and Jscrambler\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Pedro Fortuna\"},{\"@type\":\"Person\",\"name\":\"Sam Kerr\"}],\n        \"datePublished\": \"2021-06-09\",\n      }",{"title":5243,"description":5244,"authors":5249,"heroImage":5245,"date":5252,"body":5253,"category":791,"tags":5254},[5250,5251],"Pedro Fortuna","Sam Kerr","2021-06-09","\nDevelopment teams are building, testing, and shipping code faster than ever before. Today, we know that security has a role to play at the early stages of the [DevOps workflow](/topics/devops/), but these security controls are mostly centered around finding and fixing bugs and vulnerabilities during development.\n\nIn this tutorial, we will explore the importance of protecting client-side application code at runtime and guide you through implementing it in your GitLab instance using the integration with [Jscrambler](https://jscrambler.com/).\n\n## The importance of runtime code protection\n\nWith web and mobile applications dealing with increasingly sensitive data, addressing the application's attack surface requires considering additional threats that are not directly linked to vulnerabilities.\n\nThis concern has been widely covered in NIST, ISO 27001, and some of the latest iterations of OWASP guides, such as the [Mobile Application Security Verification Standard](https://mobile-security.gitbook.io/masvs/). These information security standards highlight that attackers who gain unwarranted access to the application's source code may be able to retrieve proprietary code, find ways to bypass app restrictions, and make more progress while planning/automating data exfiltration attacks.\n\nAs such, it's important that companies implement an additional security layer (on top of application security best practices) to tackle the threats of tampering and reverse engineering of an application's source code.\n\n## Getting started with Jscrambler + GitLab\n\nA robust code protection approach must include multiple layers to raise the bar for reverse-engineering and tampering attempts. Jscrambler achieves this by using a combination of code protection techniques, including obfuscation, code locks, runtime protection, and threat monitoring.\n\nLet's see how you can easily set up this layered source code protection using Jscrambler in your GitLab instance.\n\n### What you need for the Jscrambler integration\n\nTo use this integration with Jscrambler, make sure that you meet the following prerequisites:\n\n* A JavaScript-based project, as Jscrambler can protect JavaScript-based web and hybrid mobile apps\n* A [Jscrambler account](https://jscrambler.com/signup)\n* A GitLab instance where the Jscrambler integration will run\n\n### How to configure Jscrambler\n\nThe first step of this integration is to define the Jscrambler code protection techniques you want to use. The best way to do this is through the [Jscrambler web app](https://app.jscrambler.com/). You can either select one of the pre-defined templates or pick techniques one by one. Review [the Jscrambler guide](https://blog.jscrambler.com/jscrambler-101-first-use/) for further instructions on choosing Jscrambler techniques. No matter what you choose, download Jscrambler's JSON configuration file by clicking the download button next to the Application Settings, as shown below.\n\n![Jscrambler_download_JSON](https://about.gitlab.com/images/blogimages/jscrambler-app-download-json.gif \"How to download Jscrambler's JSON config.\")\nHow to download Jscrambler's JSON config.\n{: .note.text-center}\n\nPlace the file you just downloaded in your project's root folder and rename it to `.jscramblerrc`. Now, open the file and make sure you remove the access and secret keys from this configuration file by removing the following lines.\n\n```json\n \"keys\": {\n   \"accessKey\": \"***********************\",\n   \"secretKey\": \"***********************\"\n },\n```\n\nThis will prevent having hardcoded API keys, which could pose security issues. You should store these API keys using the [GitLab CI environment variables](https://docs.gitlab.com/ee/ci/variables/), as shown below.\n\n![Jscrambler API keys as GitLab environment variables](https://docs.jscrambler.com/637a78d94e016c8be1866edb0627f2bc.png)\nWhere to score Jscrambler's API keys in GitLab.\n{: .note.text-center}\n\nAnd that's all you need from Jscrambler's side!\n\n### Configuring a Jscrambler job inside GitLab CI\n\nStart by checking you have placed the `.gitlab-ci.yml` file at the root of your project. Inside this file, you will need to define your `build` stage, as well as add a new `protect` stage, as shown below.\n\n```yml\nstages:\n - build\n - protect\n # - deploy\n # ...\n```\n\nThe `build` stage should be configured as follows:\n\n```yml\nbuild:production:\n stage: build\n artifacts:\n   when: on_success\n   paths:\n     - build\n script:\n   - npm i\n   - npm run build\n```\n\nThis configuration will run the `npm run build` command, which is a standard way of building your app to production, placing the resulting production files in the `/build` folder. Plus, it ensures that the `/build` folder becomes available as a [GitLab CI artifact](https://docs.gitlab.com/ee/ci/pipelines/job_artifacts.html) so that it can be used later in other jobs.\n\nHere, make sure that you set the build commands and build folder according to your own project, as these may vary.\n\nNext, configure the `protect` stage as shown below:\n\n```yml\nbuild:production:obfuscated:\n stage: protect\n before_script:\n   - npm i -g jscrambler\n dependencies:\n   - build:production\n artifacts:\n   name: \"$CI_JOB_NAME\"\n   when: on_success\n   paths:\n     - build\n   expire_in: 1 week\n script:\n   # By default, all artifacts from previous stages are passed to each job.\n   - jscrambler -a $JSCRAMBLER_ACCESS_KEY -s $JSCRAMBLER_SECRET_KEY -o ./ build/**/*.*\n```\n\nThis stage starts by installing the Jscrambler npm package globally. Next, it is configured to execute Jscrambler at the end of each new production build process. Typically, you will want to ensure that Jscrambler is the last stage of your build process, because Jscrambler transforms the source code extensively and can also add [anti-tampering protections](https://docs.jscrambler.com/code-integrity/documentation/transformations/self-defending). This means changing the files after they have been protected by Jscrambler may break the app functionality.\n\nThis `protect` stage is configured to access the Jscrambler API keys that have been loaded as GitLab environment variables. Finally, the output of the protection is placed into the same `/build` folder and made available as a GitLab CI artifact for posterior use (e.g., a deploy job).\n\nNote that while this example shows how to use the Jscrambler CLI client to protect the code, Jscrambler is compatible with [other clients](https://docs.jscrambler.com/code-integrity/documentation/api/clients), such as Grunt, Gulp, webpack, Ember, and Metro (React Native).\n\nAnd, that's all there is to it! You can configure your `deploy` stage as usual, which should access the contents of the `build/` folder and ensure your protected files are available in a live production environment.\n\n### Checking the protection result\n\nAs a final (optional) step, you might want to check the live app and see what its source code looks like. You can do that easily by using a browser debugger and opening the files from the \"Sources\" tab. The protected code should look completely unintelligible, similar to the one shown below.\n\n![Source code protected by Jscrambler](https://i.imgur.com/HXLZyFh.png)\nExample of murky source code protected by Jscrambler.\n{: .note.text-center}\n\nJust bear in mind that, in case you are using Jscrambler's anti-debugging transformations, your browser debugger will likely crash or derail the app execution. This is intended behavior, which is very useful to prevent reverse-engineering of the code.\n\n## Final thoughts\n\nAs we saw in this tutorial, setting up this integration between Jscrambler and GitLab is very straightforward. It introduces a new `protect` stage where the JavaScript source code is protected by Jscrambler before deployment.\n\nJscrambler goes well beyond JavaScript obfuscation since it provides runtime protection techniques such as [self defending](https://docs.jscrambler.com/code-integrity/documentation/transformations/self-defending) and [self healing](https://docs.jscrambler.com/code-integrity/documentation/transformations/self-healing), which provide anti-tampering and anti-debugging capabilities, as well as [code locks](https://docs.jscrambler.com/code-integrity/documentation/client-side-countermeasures). For more details about Jscrambler transformations, review [Jscrambler's documentation page](https://docs.jscrambler.com/).\n\n## Watch the demo\n\nMore of a video person? Watch the demo on how to protect your source code using GitLab and Jscrambler.\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/aBx2Vtbe-1w\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[9,875,728],{"slug":5256,"featured":6,"template":686},"how-to-protect-your-source-code-with-gitlab-and-jscrambler","content:en-us:blog:how-to-protect-your-source-code-with-gitlab-and-jscrambler.yml","How To Protect Your Source Code With Gitlab And Jscrambler","en-us/blog/how-to-protect-your-source-code-with-gitlab-and-jscrambler.yml","en-us/blog/how-to-protect-your-source-code-with-gitlab-and-jscrambler",{"_path":5262,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5263,"content":5269,"config":5277,"_id":5279,"_type":14,"title":5280,"_source":16,"_file":5281,"_stem":5282,"_extension":19},"/en-us/blog/how-to-provision-reviewops",{"title":5264,"description":5265,"ogTitle":5264,"ogDescription":5265,"noIndex":6,"ogImage":5266,"ogUrl":5267,"ogSiteName":670,"ogType":671,"canonicalUrls":5267,"schema":5268},"Deploying dynamic review environments with MRs and Argo CD","Here's how to use the Argo CD ApplicationSet to provision a ‘ReviewOps’ environment based on merge request changes.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681920/Blog/Hero%20Images/kubernetes.png","https://about.gitlab.com/blog/how-to-provision-reviewops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to provision dynamic review environments using merge requests and Argo CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Joe Randazzo\"},{\"@type\":\"Person\",\"name\":\"Madou Coulibaly\"}],\n        \"datePublished\": \"2022-08-02\",\n      }",{"title":5270,"description":5265,"authors":5271,"heroImage":5266,"date":5274,"body":5275,"category":791,"tags":5276},"How to provision dynamic review environments using merge requests and Argo CD",[5272,5273],"Joe Randazzo","Madou Coulibaly","2022-08-02","\nWe recently learned of a new contribution to the ApplicationSet in the Argo CD project, specifically the [Pull Request generator for GitLab](https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/applicationset/Generators-Pull-Request.md#gitlab) and decided to take it for a spin. What makes this interesting is now dynamic [review environments](https://docs.gitlab.com/ee/ci/review_apps/index.html) can be provisioned intuitively from the merge request (MR) using a [GitOps](/topics/gitops/) workflow. The benefit is code reviewers or designers can quickly review any app changes to your Kubernetes cluster all from within the merge request.\n\nIn traditional testing workflows, you may have pushed your changes into a development environment, waiting for the QA and UX team to pull those changes into their environment for further review, and then received feedback based on your small change. At this point, time was wasted between various teams with environment coordination or adding bugs to the backlog of the new changes. \n\nWith the combination of a merge request and review environments, you can quickly spin up a test environment based on the changes of your feature branch. This means the QA or UX team can suggest improvements or changes during the code review process without wasting cycles.\n\nThe introduction of the ApplicationSet has given greater flexibility to Argo CD workflows such as:\n\n- Allowing unprivileged cluster users to deploy applications (without namespace access)\n- Deploying applications to multiple clusters at once\n- Deploying many applications from a single monorepo\n- **And triggering review environments based on a pull request**\n\n### Let's review the ApplicationSet and the GitLab Pull Request Generator\n\nThe [Pull Request Generator](https://argo-cd.readthedocs.io/en/latest/operator-manual/applicationset/Generators-Pull-Request) will use the GitLab API to automatically discover new merge requests within a repository. Depending on the filter match of the MR, a review environment will then be generated.\n\n```yaml\napiVersion: argoproj.io/v1alpha1\nkind: ApplicationSet\nmetadata:\n  name: review-the-application\n  namespace: argocd\nspec:\n  generators:\n  - pullRequest:\n      gitlab:\n        project: \u003Cproject-id>\n        api: https://gitlab.com/\n        tokenRef:\n          secretName: \u003Cgitlab-token>\n          key: token\n        pullRequestState: opened\n      requeueAfterSeconds: 60\n  template:\n    metadata:\n      name: 'review-the-application-{{number}}'\n    spec:\n      source:\n        repoURL: \u003Crepository-with-manifest-files>\n        path: chart/\n        targetRevision: 'HEAD'\n        helm:\n          parameters:\n          - name: \"image.repository\"\n            value: \"registry.gitlab.com/\u003Cgroup-and-project-path>/{{branch}}\"\n          - name: \"image.tag\"\n            value: \"{{head_sha}}\"\n          - name: \"service.url\"\n            value: \"the-application-{{number}}.\u003Cip>.nip.io\"\n      project: default\n      destination:\n        server: https://kubernetes.default.svc\n        namespace: dynamic-environments-with-argo-cd\n```\n#### Fields\n\n* `project`: The GitLab Project ID\n* `api`: URL of GitLab instance\n* `tokenRef`: The secret to monitor merge request changes\n* `labels`: Provision review environments based on a GitLab label\n* `pullRequestState`: Provision review environments based on [MR states](https://docs.gitlab.com/ee/api/merge_requests.html)\n\nFilter options include GitLab labels, merge request state (open, closed, merged), and branch match. Templating options include merge request ID, branch name, branch slug, head sha, and head short sha.\n\nSee the latest [ApplicationSet documentation](https://argo-cd.readthedocs.io/en/latest/operator-manual/applicationset/Generators-Pull-Request/#gitlab) for additional details.\n\nFor this blog post, we explore using the Argo CD ApplicationSet to provision a “ReviewOps” environment based on merge request changes.\n\n### Prerequisites\n\nThe following tools are required for running this tutorial. Please install and/or configure them before getting started.\n\n- **Tools**\n  - GitLab v15.0+ \n  - Kubernetes cluster v1.21+\n  - Argo CD 2.5.0+\n- **CLI**\n  - kubectl v1.21+\n\n### Explore the Source Code\n\nFirst, let’s explore the [source code](https://gitlab.com/madou-stories/dynamic-environments-with-argo-cd) for the tutorial.\n\nThis GitLab group is composed of the 2 following projects:\n\n- `The Application`: contains the source code of a containerized application and its CI/CD pipeline\n- `The Application Configuration`: contains the application configuration (Kubernetes Manifests) managed by Helm\n\n![git-repository](https://about.gitlab.com/images/blogimages/2022-08-01-how-to-provision-reviewops/git-repository.png)\n\n### Setting up GitLab\n\n1. Create your GitLab Group and fork the [The Application](https://gitlab.com/madou-stories/dynamic-environments-with-argo-cd/the-application) and [The Application Configuration](https://gitlab.com/madou-stories/dynamic-environments-with-argo-cd/the-application-configuration) projects into it.\n\n2. In `The Application Configuration` project, edit the `**manifests/applicationset.yml**` as follows:\n\n  * `.spec.generators.pullRequest.gitlab.project`: The Project ID of `The Application`\n  * `.spec.template.spec.source.repoURL`: Git URL of `The Application Configuration`\n  * `.spec.template.spec.source.helm.parameters.\"image.repository\"`: Point to image repository, for example `registry.gitlab.com/\u003CYour_GitLab_Group>/the-application/{{branch}}`\n\n  Note: keep the {{branch}} string as is and replace \u003CYour_GitLab_Group> with the name of the group you created in step 1.\n\n  * `.spec.template.spec.source.helm.parameters.\"service.url\"`: Templated with `the-application-{{number}}.\u003CYour_Kube_Ingress_Base_Domain>`\n\n  Note: keep the {{number}} string as is and replace \u003CYour_Kube_Ingress_Base_Domain> with the base domain of your Kubernetes Cluster.\n\n3. Define the following CI/CD variables at the group level:\n\n   - `ARGOCD_SERVER_URL`, the Argo CD server address\n   - `ARGOCD_USERNAME`, the username of your Argo CD account\n   - `ARGOCD_PASSWORD`, the password of your Argo CD account\n   - `KUBE_INGRESS_BASE_DOMAIN`, the base domain of your Kubernetes Cluster\n\n   ![cicd-variables](https://about.gitlab.com/images/blogimages/2022-08-01-how-to-provision-reviewops/cicd-variables.png)\n\n4. Generate a Group access token to grant `read_api` and `read_registry` access to this group and its sub-projects.\n\n   ![group-access-token](https://about.gitlab.com/images/blogimages/2022-08-01-how-to-provision-reviewops/group-access-token.png)\n\n   Save the group access token somewhere safe. We will use it later.\n\n### Setting up Kubernetes\n\n1. Create a namespace called `dynamic-environments-with-argo-cd`.\n   ```shell\n   kubectl create namespace dynamic-environments-with-argo-cd\n   ```\n2. Create a Kubernetes secret called `gitlab-token-dewac` to allow Argo CD to use the GitLab API.\n   ```shell\n   kubectl create secret generic gitlab-token-dewac -n argocd --from-literal=token=\u003CYour_Access_Token>\n   ```\n3. Create another Kubernetes secret called `gitlab-token-dewac` to allow Kubernetes to pull images from the GitLab Container Registry.\n   ```shell\n   kubectl create secret generic gitlab-token-dewac -n dynamic-environments-with-argo-cd --from-literal=token=\u003CYour_Access_Token>\n   ```\n\n### Setting up Argo CD\n\n1. Create the Argo CD ApplicationSet to generate an Argo CD Application associated with a merge request.\n   ```shell\n   kubectl apply -f https://gitlab.com/\u003CYour_GitLab_Group>/the-application-configuration/-/raw/main/manifests/applicationset.yaml\n   ```\n\n### Update the source code\n\n1. In `The Application` project, create a GitLab issue, then an associated branch and merge request. \n2. In Argo CD, a new application is provisioned called `review-the-application` based on the new merge request event.\n\n   ![review-the-application-argocd](https://about.gitlab.com/images/blogimages/2022-08-01-how-to-provision-reviewops/review-the-application-argocd.png)\n\n3. In `The Application` project, edit the `index.pug` and replace `p Welcome to #{title}`  with `p Bienvenue à #{title}`.\n4. Commit into your recent branch which is going to trigger a pipeline run.\n5. In the CI/CD > Pipelines, you will find the following pipeline running on your merge request:\n\n   ![feature-branch-pipeline](https://about.gitlab.com/images/blogimages/2022-08-01-how-to-provision-reviewops/feature-branch-pipeline.png)\n\n   where,\n\n   - `docker-build`: builds the container image\n   - `reviewops`: configures and deploys the container into the review environment using Argo CD\n   - `stop-reviewops`: deletes the review environment\n\n6. Once completed, the `review-the-application` application in Argo CD is now synced.\n\n   ![review-the-application-synced](https://about.gitlab.com/images/blogimages/2022-08-01-how-to-provision-reviewops/review-the-application-synced.png)\n\n7. From the merge request, click on the `View app` button to access to your application.\n\n   ![view-app-button](https://about.gitlab.com/images/blogimages/2022-08-01-how-to-provision-reviewops/view-app-button.png)\n\n   The outcome should be as follows:\n\n   ![express-app](https://about.gitlab.com/images/blogimages/2022-08-01-how-to-provision-reviewops/express-app.png)\n\n8. You have succesfully provisioned a dynamic review environment based on your merge request! Once the merge request is closed, the environment will be automatically cleaned up.\n\n## To sum up\n\nHopefully this tutorial has been helpful and has inspired your GitLab + Argo CD workflows with review environments.\n\nWe'd love to hear in the comments on how this is working for you, as well as your ideas on how we can make GitLab a better place for GitOps workflows.\n",[771,534,9],{"slug":5278,"featured":6,"template":686},"how-to-provision-reviewops","content:en-us:blog:how-to-provision-reviewops.yml","How To Provision Reviewops","en-us/blog/how-to-provision-reviewops.yml","en-us/blog/how-to-provision-reviewops",{"_path":5284,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5285,"content":5291,"config":5295,"_id":5297,"_type":14,"title":5298,"_source":16,"_file":5299,"_stem":5300,"_extension":19},"/en-us/blog/how-to-security-as-code",{"title":5286,"description":5287,"ogTitle":5286,"ogDescription":5287,"noIndex":6,"ogImage":5288,"ogUrl":5289,"ogSiteName":670,"ogType":671,"canonicalUrls":5289,"schema":5290},"Why implementing security as code is important for DevSecOps","We created a DevSecOps assessment to help your company level up its DevSecOps capabilities.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663618/Blog/Hero%20Images/how-to-implement-security-as-code.jpg","https://about.gitlab.com/blog/how-to-security-as-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why implementing security as code is important for DevSecOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2020-03-12\",\n      }",{"title":5286,"description":5287,"authors":5292,"heroImage":5288,"date":3036,"body":5293,"category":679,"tags":5294},[1016],"\n## What is security as code?\n\nSecurity as code is a driving force in the future of [application security](/topics/devsecops/).\nAccording to O’Reilly, [security as code is the practice of building security\ninto DevOps tools and workflows](https://www.oreilly.com/library/view/devopssec/9781491971413/ch04.html) by mapping out how changes to code and infrastructure\nare made and finding places to add security checks, tests, and gates without\nintroducing unnecessary costs or delays.\nDevelopers can define infrastructure using a\nprogramming language with infrastructure as code. The same needs to happen to bring security to the speed of DevOps.\n\nAt a basic level, security as code can be achieved by integrating security\npolicies, tests, and scans into the pipeline and code itself. Tests should be\nrun automatically on every code commit, with results made immediately available\nto developers for fixing. By bringing security scans to the code as it’s written,\nteams will save both time and money by streamlining the review process later in\nthe software development lifecycle (SDLC).\n\n## Why is it important?\n\nSecurity as code is key to shifting left and achieving [DevSecOps](/solutions/security-compliance/): It requires\nthat security be defined at the beginning of a project and codified for\nrepeated and consistent use. In this way, it gives developers a self-service\noption for ensuring their code is secure.\n\nPredefined security policies boost efficiency, and also allow for checks on\nautomated processes to prevent any mishaps in the deployment process (like\naccidentally taking down the whole infrastructure because a problem wasn’t\nidentified in a staging environment).\n\n## Six security as code capabilities to prioritize\n\nFrancois Raynaud, founder and managing director of [DevSecCon](https://www.devseccon.com/),\nsaid that [security as code is about making security more transparent and\ngetting security practitioners and developers to speak the same language](https://techbeacon.com/devops/devseccon-security-code-secure-devops-techniques-track).\nIn other words – security teams need to understand how developers work, and use that\ninsight to help developers build the necessary security controls into the SDLC.\nDevelopers can reciprocate by staying open-minded as they adopt new tools and\npractices to boost security during the development process. Here are six best\npractices and capabilities to build into your pipeline:\n\n1. Automate security scans and tests (such as [static analysis](https://docs.gitlab.com/ee/user/application_security/sast/),\n[dynamic analysis](https://docs.gitlab.com/ee/user/application_security/dast/),\nand penetration testing) within your pipeline so that they can be reused across\nall projects and environments.\n1. Build a continuous feedback loop by presenting results to developers, allowing\nthem to remediate issues while coding and learn best practices during the coding\nprocess.\n1. Evaluate and monitor automated security policies by building checks into the\nprocess. Verify that sensitive data and secrets are not inadvertently shared or published.\n1. Automate complex or time-consuming manual tests via custom scripts, with\nhuman sign-off on results if necessary. Validate the accuracy and efficiency of\ntest scripts so that they can be replicated across different projects.\n1. Test new code within a staging environment to allow for thorough security and\nlow-impact failure, and test on every code commit.\n1. Scheduled or continuous monitoring should automatically create logs (or red\nflags) within a review dashboard (such as GitLab’s [Security Dashboard feature](https://docs.gitlab.com/ee/user/application_security/security_dashboard/index.html)).\n\n## Security as code is a best practice for a bigger goal\n\nSecurity as code gives pragmatic meaning to the concept of DevSecOps, but it\nshould not be your end goal. Ultimately, security as code is a means to get more people on board with integrating security throughout your\nSDLC. The idea will feel familiar to developers who\nhave practiced infrastructure as code, and it provides an opportunity for\nsecurity to step into the fray both to better understand software development\nand to help design the policies that will be codified in the process.\n\nAs your team works its way toward becoming a well-oiled DevSecOps machine,\nsecurity as code will inevitably present itself as a smart solution within a complex endeavor.\n\n## GitLab’s DevSecOps methodology assessment\n\nThere’s a lot to cover when standing up a DevSecOps process – so to help you\nmaster the key elements, we created a DevSecOps methodology assessment. Score\nyourself on 20 capabilities, and then use those scores to understand your DevSecOps\nmaturity level, and determine what actions your team can take to bring your DevSecOps to\nthe next level. [Download the assessment here.](https://about.gitlab.com/resources/devsecops-methodology-assessment/)\n\nCover image by [Tim Evans](https://unsplash.com/@tjevans) on [Unsplash](https://unsplash.com/photos/Uf-c4u1usFQ)\n{: .note}\n",[9,875,109,683],{"slug":5296,"featured":6,"template":686},"how-to-security-as-code","content:en-us:blog:how-to-security-as-code.yml","How To Security As Code","en-us/blog/how-to-security-as-code.yml","en-us/blog/how-to-security-as-code",{"_path":5302,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5303,"content":5309,"config":5314,"_id":5316,"_type":14,"title":5317,"_source":16,"_file":5318,"_stem":5319,"_extension":19},"/en-us/blog/how-to-stand-up-gitlab-in-awsmp",{"title":5304,"description":5305,"ogTitle":5304,"ogDescription":5305,"noIndex":6,"ogImage":5306,"ogUrl":5307,"ogSiteName":670,"ogType":671,"canonicalUrls":5307,"schema":5308},"How to stand-up a GitLab instance in AWS Marketplace","This is a quick quide to help you provision a GitLab instance in the AWS Marketplace and setup a Runner.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682043/Blog/Hero%20Images/awsmp.png","https://about.gitlab.com/blog/how-to-stand-up-gitlab-in-awsmp","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to stand-up a GitLab instance in AWS Marketplace\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2021-06-30\",\n      }",{"title":5304,"description":5305,"authors":5310,"heroImage":5306,"date":5311,"body":5312,"category":791,"tags":5313},[2120],"2021-06-30","\n\n## In this guide we will learn how to spin up GitLab in the AWS Marketplace:\n\n### Pre requisites for this lab are having an account in AWS and an accessible and working VPC.\n\n### We will learn the following steps:\n\n1. Stand up a self-managed instance of GitLab.\n2. Install Runner and Docker Engine.\n\n\n## Step-by-step Instructions\n\n\n### Step 1 - Stand up GitLab instance in AWS\n\n\n- Open [GitLab Ultimate](https://aws.amazon.com/marketplace/pp/B07SJ817DX) in AWS Marketplace.\n- Click on **Continue to Subscribe**\n\n![aws-1](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/aws-1.png)\n\n- Sign in with your IAM user.\n\n![aws-2](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/aws-2.png)\n\n- Click on **Continue to Configuration**.\n\n![aws-3](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/aws-3.png)\n\n- Leave the default value for **Delivery Method**, select the latest version in **Software Version**, select your **Region**, click **Continue to Launch**.\n\n![aws-4](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/aws-4.png)\n\n- In Launch this software page, scroll down.\n\n![aws-5](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/aws-5.png)\n\n- Under **Security Group Settings** click **Create New Based On Seller Settings** .\n\n![aws-6](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/aws-6.png)\n\n- Name your security group, add a description, and save it.\n\n![aws-7](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/aws-7.png)\n\n- Select **Key Pair**. If you don't have key pair, create one. Leave other fields in this page with default values.  Click **Launch**.\n\n![aws-8](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/aws-8.png)\n\n- You will get Congratulations message confirming you launched the machine successfully. In this message click on **EC2 Console** link.\n\n![aws-9](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/aws-9.png)\n\n- Click on your instance ID link.\n\n![aws-10](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/aws-10.png)\n\nThe provisioning takes a few minutes. Please wait before you start the next step.\n\n- Click \"Open address\" in order to open GitLab UI.\n\n Copy the **private** or **public** IP to your browser , depending on your **VPC configuration**.\n\n\n![aws-10_5](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/aws-10_5.png)\n\n- It takes a few minutes to start the server, you may see this error, this is ok, wait 1 minute and refresh the page.\n\n![aws-11](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/aws-11.png)\n\n- You now should be able to access the GitLab login page; Username is **root**, password is your **instance ID**, click **Sign in**.\n\n![aws-13](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/aws-13.png)\n\n## Congratulations! you managed to start a GitLab instance and sign in to it.\n\n![aws-14](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/aws-14.png)\n\n\n\n\n### Step 2 - Install Runner and Docker Engine\n\n\nRunner machines are the build agents that run the CI/CD jobs.\n\nRequirements:\n\n- Jobs run inside the Docker images, therefore the runner machine requires Docker engine on the runner machine.\n\n\n### Connect to the machine with the **AWS console - Connect**\n\nIn order to setup the Runners and Docker engine, we need to connect to the GitLab machine we are running. This can be done via **SSH** from any command line, or directly via the **AWS Console**, depending on how your **VPC** is set. In our example we will use the **AWS console - Connect** feature to SSH into the machines.\n\n**WARNING: It is not a recommended best practice to install Runners on the same machine where the server is installed for security and performance reasons, but only for the sake of simplicity, in this blog we will install it on the same machine.**\n\n  - Go to your Instance summary, and click **Connect** in order to open the console.\n\n  ![runner-1](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/runner-1.png)\n\n  - Click Connect again.\n\n  ![runner-2](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/runner-2.png)\n\n\n### Install Docker engine\n\n  - Install Container by running this command `curl -fsSL https://get.docker.com -o get-docker.sh\n   sudo sh get-docker.sh`\n\n\n### Setup Runners\n\n  - Download the binaries for Linux x86 `sudo curl -L --output /usr/local/bin/gitlab-runner \"https://gitlab-runner-downloads.s3.amazonaws.com/latest/binaries/gitlab-runner-linux-386\"`\n  - Give it permissions to execute: `sudo chmod +x /usr/local/bin/gitlab-runner`\n  - Create a GitLab CI user: `sudo useradd --comment 'GitLab Runner' --create-home gitlab-runner --shell /bin/bash`\n  - Install and run as service: `sudo gitlab-runner install --user=gitlab-runner --working-directory=/home/gitlab-runner\nsudo gitlab-runner start`\n\n\n### Register the Runner\n\n\n  - Run this command: `sudo gitlab-runner register`.\n  - You will be prompt to enter URL.\n  - Open your GitLab instance, under CI/CD settings:\n    - Click Settings, CI/CD.\n\n      ![runner-2](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/runner-3.png)\n\n    - Expand **Runners**.\n\n      ![runner-4](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/runner-4.png)\n\n    - Copy the URL to the clipboard under specific runner.\n\n    ![runner-5](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/runner-5.png)\n\n  - Paste the URL in the console.\n  - Enter.\n  - You will be prompt to enter registration token, copy it from the Runner settings.\n\n![runner-5](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/runner-6.png)\n\n  - Paste it in the console.\n  - Enter Description for the runner: type **GitLab workshop**.\n  - Add a tag to this runner, for example type **Linux**\n  - Enter executor, type **docker**.\n  - Enter the default Docker image, type **ruby:2.6**.\n  - You will get a message starting with **Runner registered successfully. Feel free to start it...**\n  - Refresh the Runner settings page in GitLab and you will see your runner under **Available specific runners**.\n  - Click edit.\n\n  ![runner-7.png](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/runner-7.png)\n\n  - Check the **Indicates whether this runner can pick jobs without tags** option, and click **Save changes**.\n\n  ![runner-7.png](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/runner-8.png)\n\n\n## Well done!! You installed and registered successfully GitLab Runner. Now you are ready to create a project and run your first CI/CD pipeline.\n\nIn my next blog, I will show you how to create a project, configure the CI/CD, change your application code, and run a CI/CD pipeline.\n",[749,9],{"slug":5315,"featured":6,"template":686},"how-to-stand-up-gitlab-in-awsmp","content:en-us:blog:how-to-stand-up-gitlab-in-awsmp.yml","How To Stand Up Gitlab In Awsmp","en-us/blog/how-to-stand-up-gitlab-in-awsmp.yml","en-us/blog/how-to-stand-up-gitlab-in-awsmp",{"_path":5321,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5322,"content":5328,"config":5333,"_id":5335,"_type":14,"title":5336,"_source":16,"_file":5337,"_stem":5338,"_extension":19},"/en-us/blog/how-to-start-a-great-oss-project",{"title":5323,"description":5324,"ogTitle":5323,"ogDescription":5324,"noIndex":6,"ogImage":5325,"ogUrl":5326,"ogSiteName":670,"ogType":671,"canonicalUrls":5326,"schema":5327},"How to start a great OSS project","In a modern DevOps world it's never been more critical to embrace open source. Here's everything you need to know to get started.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679362/Blog/Hero%20Images/contribute-open-source-jobs.jpg","https://about.gitlab.com/blog/how-to-start-a-great-oss-project","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to start a great OSS project\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mike Vanbuskirk\"}],\n        \"datePublished\": \"2022-10-18\",\n      }",{"title":5323,"description":5324,"authors":5329,"heroImage":5325,"date":5330,"body":5331,"category":1318,"tags":5332},[5134],"2022-10-18","\nIf you spend any time coding, you've probably considered starting an OSS project at some point. Of course, the natural temptation is to immediately sit down and start writing code. That's a great approach that many projects have started from, but what about when it's time to let others contribute?\n\nAn OSS project is as much about community as it is code, and the key to building a good project is providing an inviting, productive place for that community to work and create. How can new contributors be onboarded smoothly? What kind of maintenance and automation will allow the project to scale beyond the scope of its original creator's time and resources? This article hopes to answer a few of these questions and provide first-time project maintainers with a solid foundation for launching a great OSS project.\n\n## Create a great README.md\n\nA README file is the \"entry point\" to an OSS project. Most distributed version control software hosting platforms like GitLab make the README file the first thing a visitor sees when viewing the repo. A good README manages to convey important information about a project while focusing on ease of navigation and reading and grabs the attention of potential contributors and users.\n\nTo start, maintainers should familiarize themselves with [Markdown](https://www.markdownguide.org), the markup language used for most OSS project documentation files like README. Markdown is a simple, elegant tool for crafting content and it's helpful to be aware of its features and capabilities.\n\nFor the README file itself, there are some things maintainers can include that will help drive productive participation and engagement.\n\n### Overview of the project\n\nA great way to draw attention to your project is to lead with a UI or CLI screenshot of the software. Even better: record some basic usage and convert it to a GIF using an OSS (of course!) tool like [Terminalizer](https://github.com/faressoft/terminalizer). The overview should also include the \"why\" of the project; it should be clear what problem or problems the project solves, and what drove the maintainer to create the project.\n\n### How to install and use it\n\nOSS project users can often become OSS project contributors; a well-run and well-documented project goes a long way towards bringing more contributors into the fold. Users should be presented with clear, concise, and most importantly correct instructions for installing and using the software contained in the project. Potential users and contributors are likely to be put off by confusing, complex, or non-functional installation instructions.\n\n### Links to documentation\n\nNot all project documentation does or even should fit inside the README file. Your project likely depends on one or more programming languages, as well as the many development tools, libraries, and modules in the language ecosystem. The README should serve as a project portal; linking to third-party documentation as needed, rather than as a comprehensive collection of all relevant documentation in one place.\n\n### Links to Code of Conduct\n\nThe global OSS project community is made up of a great many individuals, representing a rich, diverse spectrum of backgrounds and identities. With that in mind, an OSS project needs to provide a welcoming, inclusive Code of Conduct with firm and clear rules around expected behavior and decorum. One option is [Contributor Covenant](https://www.contributor-covenant.org/). A shared understanding of what defines good conduct is a pillar of a good community.\n\n### Links and instructions for reporting bugs or requesting features\n\nIf OSS project users become contributors, a great way to foster this transition is to make it easy to report bugs or request features. Ideally, this is where your README file links to your CONTRIBUTING.md file as well.\n\nA great example of an OSS project with an awesome README is [Leapp](https://github.com/Noovolari/leapp#readme); here’s another [example on GitLab](https://gitlab.com/CalcProgrammer1/OpenRGB/-/blob/master/README.md). This [Hacker News discussion](https://news.ycombinator.com/item?id=30106264) further demonstrates the power of the OSS project community in helping drive better engagement.\n\n## Creating a great CONTRIBUTING.md is important too\n\nThe CONTRIBUTING.md file represents another very important piece of documentation in an OSS project. Ideally, a CONTRIBUTING file should contain clear instructions for how individuals can get started contributing to your project. It's also important to be cognizant of first-time contributors to your project versus first-time contributors to OSS. For first-time OSS participants, it can be helpful to include links [like this](https://opensource.guide/how-to-contribute/).\n\nThe focus should be on technical detail; clear, concise instructions for how to clone, build, test, and commit are just some of what should be included. An adequate amount of detail and context is important, especially around what pre-requisite knowledge is expected and where it can be gained. The goal is to provide a deterministic path for contributors, with the end-state being a well-formed Merge Request. The [GitLab document](https://about.gitlab.com/community/contribute/) provides an excellent example.\n\nThe documentation for contributing should include:\n\n### An introductory message\n\nThis should be a warm and welcoming message that encourages individuals to participate, but also gives them the right foundation and context for creating successful and helpful commits.\n\n### How to set up a development environment\n\nDevelopment environments can be tricky to get right. Contributors may be working from a variety of different operating systems, IDEs, and hardware. Focus on making your project as environment agnostic as possible. Containerization tools like [Docker](https://www.docker.com) can help by isolating dependencies within the boundaries of a container environment. Ideally, as the project grows, you can take advantage of CI/CD automation to standardize things like linting and testing in a controlled environment or provide a one-click deployment option via something like [GitPod](https://www.gitpod.io).\n\n### How to run tests\n\nEarly in the project lifecycle, testing will probably be a minimal, non-comprehensive affair. Contributors will need to have clear guidance on setting up local development and testing to ensure their commits don't break existing functionality. Tests are another aspect of contributions that benefit heavily from automation.\n\n### Links to resources, including a style guide, the primary discussion medium, etc...\n\nContributors will almost always need to refer to additional resources to help them complete their work. The Contributing doc is a great place to link helpful and relevant documentation, including style guides, as well as third-party information. You should also highlight where the primary discussion medium for the project is hosted, which can be something like Slack, Discord, or within the repository itself.\n\n### Specific instructions on reporting bugs, and submitting changes/features\n\nBe specific and explicit with instructions for bugs, changes, and features. Providing this up-front reduces the amount of time that might be spent requesting basic formatting changes or additional information that's typically always needed on these topics.\n\n### Less experienced contributors? Suggest first-time issues\n\nContributing to OSS can be very intimidating for first-time contributors. It can be extremely helpful not just for your project, but for the entire OSS ecosystem to label issues that are ideal for first-time contributions. GitLab uses the [quick win](https://gitlab.com/gitlab-org/gitlab/-/issues/?sort=created_date&state=opened&label_name%5B%5D=quick%20win&first_page_size=20) label to highlight such issues.\n\nSome criteria that make for good first issues:\n- User-facing documentation updates\n- Adding unit tests\n- Well-scoped bug fixes, with an obvious end-state or success metric\n- Writing good code comments. Python docstrings are great for defining class and method behavior and are read by a variety of tools.\n\n## Choose a good license\n\nAn often overlooked, but no less important, part of starting an OSS project is choosing a good license. The sometimes verbose legal language of OSS licenses, as well as the scare stories of them being applied inappropriately, can be intimidating to first-time contributors.\n\nFortunately, tools like [Choose a license](https://choosealicense.com/) are available, allowing maintainers to make an informed choice about which license model is the best fit for the project.\n\nThe MIT and Apache licenses are common choices for OSS projects, but each project and maintainer are unique. Something else to consider is that a lot of OSS contributors often work professionally as software engineers, and may be subject to rules that prohibit or limit OSS contributions based on intellectual property concerns.\n\n## Use templates to make OSS maintenance easier\n\nEarly in the life of your OSS project, there are likely to only be a few contributors. The inflow of pull requests, issues and feature work will generally be pretty manageable at this stage. The need for automation and well-defined processes won't be immediately obvious, but once the project scales it's very easy to feel overwhelmed without some structure in place.\n\nTemplates are a great way to help establish some formal processes for dealing with common workflows in OSS projects. For most version control platforms, templates are Markdown documents that allow maintainers to pre-define the format and structure of things like issues, pull requests, and merge requests. There are some good examples of [issue templates here](https://gitlab.com/gitlab-org/gitlab/-/tree/master/.gitlab/issue_templates), as well as [templates for merge requests](https://gitlab.com/gitlab-org/gitlab/-/tree/master/.gitlab/merge_request_templates).\n\nGiving contributors a clear picture of the required information up front saves a lot of time and headache, and avoids the dance of maintainers having to frequently ask follow-up questions on issues to get a clear picture of the actual technical problem at hand. Once your project hits a critical mass of participation, it's very important to have a good structure of templates in place to allow you, and eventually other maintainers to leverage their time.\n\nAnother easy win for ease of maintenance is committing a well-formed gitignore file that's relevant to the type of project and language choice. The [SCM Git docs](https://git-scm.com/docs/gitignore) provide great documentation.\n\n## Automate your OSS project\n\nLeveraging maintainer resources and time is the key to successfully growing an OSS project. Beyond templates, some platforms allow maintainers to automate significant portions of the building and deployment of their projects.\n\nOne piece of automation that should be familiar to anyone with experience in a DevOps environment is Continuous Integration/Continuous Delivery(CI/CD) pipelines. CI/CD tools enable engineers to define a repeatable workflow that can lint, analyze, test, and deploy code while providing fast feedback on the outcome of each step. For example: a project using Python could integrate [pyflakes](https://gitlab.com/dnsmichi/api-playground/-/blob/main/.gitlab-ci.yml) into its CI workflow, ensuring all contributions are tested with a common standard for linting and syntax. Even [Markdown code can be tested](https://gitlab.com/gitlab-de/playground/markdown-lint-challenge/-/blob/main/.gitlab-ci.yml) this way! If maintainers want to take this pattern even further, a tool like [MKDocs can be integrated into a CI/CD workflow](https://gitlab.com/dnsmichi/opsindev.news/-/blob/main/.gitlab-ci.yml) as well to automatically generate documentation for the project. For busy maintainers, automating the typically tedious process of writing and updating documentation is a huge win.\n\nWith automation deployed, status badges can be a great way to provide contributors with a holistic view of the state of things like test coverage, build status, CI/CD health, and the current release version. The status badges on [this project](https://gitlab.com/gitlab-de/use-cases/iac-tf-vuln-module) provide both users and contributors with an at-a-glance understanding of pipeline health, and the most current release version of the module.\n\nFor anyone thinking about starting a project or already maintaining an open source project, the [GitLab for Open Source](https://about.gitlab.com/solutions/open-source/) program provides maintainers access to Ultimate features for free, which includes many valuable Security features as well as additional CI minutes.\n\n## Great OSS projects aren't just code\n\nCode is of central importance to open source software. However, an OSS project is more than just code. It's a community of diverse individuals participating in a shared goal. To help achieve that goal, it's crucial to provide a well-maintained space for that community to participate.\n\nSaying thanks for every contribution, welcoming everyone, and encouraging them to stay with feedback can also help make the project an inviting space. Along the way, you'll find new maintainers, and friends as well.\n\n_GitLab developer evangelist Michael Friedrich made significant contributions to this post._\n",[9,682,1515],{"slug":5334,"featured":6,"template":686},"how-to-start-a-great-oss-project","content:en-us:blog:how-to-start-a-great-oss-project.yml","How To Start A Great Oss Project","en-us/blog/how-to-start-a-great-oss-project.yml","en-us/blog/how-to-start-a-great-oss-project",{"_path":5340,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5341,"content":5346,"config":5351,"_id":5353,"_type":14,"title":5354,"_source":16,"_file":5355,"_stem":5356,"_extension":19},"/en-us/blog/how-to-status-checks",{"title":5342,"description":5343,"ogTitle":5342,"ogDescription":5343,"noIndex":6,"ogImage":928,"ogUrl":5344,"ogSiteName":670,"ogType":671,"canonicalUrls":5344,"schema":5345},"How to use external status checks for merge requests","Want to integrate third-party systems and apps with GitLab merge requests? Here's everything you need to know.","https://about.gitlab.com/blog/how-to-status-checks","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use external status checks for merge requests\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2021-10-04\",\n      }",{"title":5342,"description":5343,"authors":5347,"heroImage":928,"date":5348,"body":5349,"category":791,"tags":5350},[1727],"2021-10-04","\n\nThe [external status checks for merge requests capability](/releases/2021/07/22/gitlab-14-1-released/#external-status-checks-for-merge-requests) was recently introduced in GitLab and it allows the integration of third-party systems and applications with GitLab merge requests.\n\n## What are \"external status checks for merge requests\"?\n\nExternal status checks are API calls to systems or applications that sit outside GitLab. These API calls are invoked during merge requests, which display a widget with the status of each external check. With external status checks, you can integrate GitLab with third-party systems, e.g. Salesforce, PeopleSoft, Microsoft Dynamics, etc., that require manual approval for merge requests. This makes it easy to see that merge requests have met external requirements before being merged, adding an extra method to ensure compliance and audit requirements are met.\n\n## Steps to enable and use external status checks for merge requests\n\nIn this example, I have a sample project called **my-proj**, for which I'd like to add and exercise a single external status check, which will hypothetically do some kind of validation for the merge request.\n\n### Adding an external status check to your project\n\nExternal status checks are added to merge requests by heading to your project’s **Settings > General** and then expanding the **Merge requests** section. Towards the bottom of the **Merge requests** section, you will see an **Add status check** button, which you will need to click to to display the **Add status check** pop-up dialog:\n\n\u003C!--\n![Add status check dialog](https://about.gitlab.com/images/blogimages/how-to-status-checks/1-add-status-check-dialog.png){: .shadow.small.center.wrap-text}\nAdd status check dialog with filled values\n{: .note.text-center}\n-->\n\n\u003Cimg src=\"/images/blogimages/how-to-status-checks/1-add-status-check-dialog.png\" width=\"50%\" height=\"50%\">\nAdd status check dialog with filled values\n{: .note.text-center}\n\nIn the dialog above, the external service name is being given the name *compliance-check*. The external API that will be called is:\n\n> https://tech-marketing-sandbox-cd-compvalidator.compliance.gitlabworkshops.io/validate\n\n> **NOTE:** the *validate* service above was [a simple Java service that I set up](https://gitlab.com/tech-marketing/sandbox/cd/compvalidator) ahead of time to mimic a third-party external service. It returned an HTTP 200 success message when invoked. In a real life scenario, this external API call would be a SaaS service or an on-premises ERP system, for example.\n\nThe API above is a call - invoked from any merge requests created under this project - to an external system that will run a compliance check and validate modifications to this application.\n\nAs the target branch, the default *Any branch* has been selected. Another option could have been the *main* branch.\n\nWhen you click the **Add status check** button, an entry will be created in the **Status checks** table, as shown below:\n\n![status check table](https://about.gitlab.com/images/blogimages/how-to-status-checks/2-status-checks-table.png){: .shadow.small.center.wrap-text}\nStatus checks table\n{: .note.text-center}\n\n### External status check in action\n\nTo exercise the external status check for merge requests, we need to create a merge request. But before that, let's create an issue.\n\n1. Create an issue by clicking on **Issues > List** from the left vertical navigation menu to get to the Issues screen.\n\n2. Then click on the **New Issue** button\n\n3. On the **New Issue** window:\n\n3.1. In the Title field, enter \"External status check demo\"\n\n3.2. In the Description field, enter \"Issue to demonstrate an external status check\"\n\n3.3. Click on **Assign to me** next to the **Assignees** field\n\n3.4. Click on the **Create issue** button at the bottom of the window\n\n\u003C!--\n![issue create window](https://about.gitlab.com/images/blogimages/how-to-status-checks/3-issue-create-window.png){: .shadow.small.center.wrap-text}\nCreating an issue\n{: .note.text-center}\n-->\n\n\u003Cimg src=\"/images/blogimages/how-to-status-checks/3-issue-create-window.png\" width=\"75%\" height=\"75%\">\nCreating an issue\n{: .note.text-center}\n\nOnce the issue is created, you will be in the detail issue window.\n\n4. Click on the **Create merge request** button on the right hand side of the detailed issue window.\n\n![create a merge request](https://about.gitlab.com/images/blogimages/how-to-status-checks/4-create-merge-req.png){: .shadow.small.center.wrap-text}\nCreating a merge request\n{: .note.text-center}\n\nOnce the merge request is created, you will be in the detail merge request window.\n\n5. Click on the **Open in Web IDE** button on the right hand side of the detailed merge request window:\n\n![open webIDE](https://about.gitlab.com/images/blogimages/how-to-status-checks/5-open-webide.png){: .shadow.small.center.wrap-text}\nOpening the Web IDE\n{: .note.text-center}\n\n6. Make a minor update to the application. In the sample project **my-proj**, I modified two files: DemoApplication.java and DemoApplicationTests.java.\n\n6.1. In the DemoApplication.java class, I added the word \"today\" to the string returned by a call to this class:\n\n![update DemoApp](https://about.gitlab.com/images/blogimages/how-to-status-checks/6-update-demoapp.png){: .shadow.small.center.wrap-text}\nMaking a simple update to DemoApplication.java\n{: .note.text-center}\n\n6.2. In the DemoApplicationTests.java class, which is a unit test for DemoApplication.java, I also added the word \"today\" to the string in the *assertThat()* invocation to match the value returned by a call to the DemoApplication.java class:\n\n![update DemoAppTests](https://about.gitlab.com/images/blogimages/how-to-status-checks/7-update-demoapptests.png){: .shadow.small.center.wrap-text}\nMaking a simple update to DemoApplicationTests.java\n{: .note.text-center}\n\n7. Click on the **Commit…** button at the bottom of the Web IDE window. And then ensure to select the feature branch for the merge request before clicking on the **Commit** button again:\n\n\u003C!--\n![committing to feature branch](https://about.gitlab.com/images/blogimages/how-to-status-checks/8-click-commit.png){: .shadow.small.center.wrap-text}\nCommitting to the feature branch\n{: .note.text-center}\n-->\n\n\u003Cimg src=\"/images/blogimages/how-to-status-checks/8-click-commit.png\" width=\"30%\" height=\"30%\">\nCommitting to the feature branch\n{: .note.text-center}\n\n8. Go back to the merge request detail window by clicking on the merge request number on the bottom margin of the window:\n\n\u003C!--\n![click on merge request link](https://about.gitlab.com/images/blogimages/how-to-status-checks/9-click-mr-at-bottom.png){: .shadow.small.center.wrap-text}\nClicking on merge request link at bottom of window\n{: .note.text-center}\n-->\n\n\u003Cimg src=\"/images/blogimages/how-to-status-checks/9-click-mr-at-bottom.png\" width=\"75%\" height=\"75%\">\nClicking on merge request link at bottom of window\n{: .note.text-center}\n\n9. On the detail merge request window, scroll down until you see a section titled **Status checks 1 pending**. This is the merge request widget that lists all external status checks associated with merge requests. Click on the **Expand** button on the right hand side of this section:\n\n![expanding status checks widget](https://about.gitlab.com/images/blogimages/how-to-status-checks/10-click-on-expand.png){: .shadow.small.center.wrap-text}\nExpanding the status checks widget in the merge request\n{: .note.text-center}\n\n10. In the expanded section, you will see an entry for the external status check you defined above, whose name is *compliance-check*. Notice that to the left of its name, there is a pause symbol indicating to the merge request stakeholders that the check is still in progress and has not communicated its approval to the merge request yet:\n\n![list of status checks](https://about.gitlab.com/images/blogimages/how-to-status-checks/11-status-checks-widget-expanded.png){: .shadow.small.center.wrap-text}\nList of external status checks\n{: .note.text-center}\n\n11. In a real life scenario, the pause symbol would change to a green checkmark when the external status check communicates to GitLab that the compliance validation is finished, i.e. the merge request has been approved by the external service:\n\n![status checks passed](https://about.gitlab.com/images/blogimages/how-to-status-checks/12-status-check-passed.png){: .shadow.small.center.wrap-text}\nStatus checks that have passed\n{: .note.text-center}\n\n### How does an external status check inform GitLab that it has approved the merge request\n\nUsing an external status check integrates GitLab merge requests to a home-grown or SaaS application, for example, by invoking an API of this external system. Once this external system does its compliance validation or check, then it needs to inform GitLab that it has approved the merge request. To do this, the external system API must make use of the [GitLab external status checks API](https://docs.gitlab.com/ee/api/status_checks.html) to communicate to GitLab that the MR is approved. This is a 2-step process:\n\n1. The first step is to get the ID of the external status check you need to approve. Here is an example of how to invoke the GitLab API to do this:\n\n> curl --request GET --header \"PRIVATE-TOKEN: \u003Creplace with your GitLab API token>\" \"https://gitlab.com/api/v4/projects/28933616/merge_requests/1/status_checks\"\n\nAn example of what the command above will return follows:\n\n> [{\"id\":86,\"name\":\"compliance-check\",\"external_url\":\"https://tech-marketing-sandbox-cd-compvalidator.compliance.gitlabworkshops.io/validate\",\"status\":\"pending\"}]\n\nThe example return value above shows that the ID of the external status check that we’d like to approve is 86.\n\n> **NOTE:** Although I'm showing an example of how to invoke the GitLab API above using the *curl* command, the idea is that your external system API call would carry out any checks and validation and then it would assemble this message in a REST HTTP call back to GitLab to communicate its approval of the merge request.\n\n2. Once you have the ID of the external status check, you can then approve it by using the GitLab API. Here’s an example:\n\n> curl --request POST --header \"PRIVATE-TOKEN:\u003Creplace with your GitLab API token>\" \"https://gitlab.com/api/v4/projects/28933616/merge_requests/1/status_check_responses?sha=\u003Creplace with SHA at HEAD of the source branch>&external_status_check_id=86\"\n\nExecuting the REST API call above will approve the external status check on the GitLab merge request.\n\n```\nNOTE: to obtain the \u003CSHA at HEAD of the source branch>, here’s an example of the command you’d need to execute:\n\n$ git ls-remote https://gitlab.com/tech-marketing/sandbox/cd/my-proj.git\n\nThe URL in the preceding line is the URL to the git project for your merge request. And here’s an example of the output of the preceding command:\n\nad1eeee497c99466797a1155f514d3c0c2f0cc45\tHEAD\n9e209c8d409a0867c1df4e0965aa675277176137\trefs/heads/1-external-status-check-demo\nad1eeee497c99466797a1155f514d3c0c2f0cc45\trefs/heads/master\n9e209c8d409a0867c1df4e0965aa675277176137\trefs/merge-requests/1/head\n```\n\nIn the output above, the SHA for the feature branch associated with the merge request is *9e209c8d409a0867c1df4e0965aa675277176137*\n\n## What we've learned\n\nGitLab recently introduced \"external status checks for merge requests,\" which are effectively API calls to systems/application that sit outside GitLab. As you could see, with external status checks for merge requests, we were able to integrate GitLab with a third-party system that required manual approval for a merge request, ensuring that your application updates meet compliance and audit requirements.\n\nFor a demo of this feature in action, watch the video below:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/v4iY8qMvFLo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n",[231,683,9],{"slug":5352,"featured":6,"template":686},"how-to-status-checks","content:en-us:blog:how-to-status-checks.yml","How To Status Checks","en-us/blog/how-to-status-checks.yml","en-us/blog/how-to-status-checks",{"_path":5358,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5359,"content":5365,"config":5370,"_id":5372,"_type":14,"title":5373,"_source":16,"_file":5374,"_stem":5375,"_extension":19},"/en-us/blog/how-to-use-agent-based-gitops",{"title":5360,"description":5361,"ogTitle":5360,"ogDescription":5361,"noIndex":6,"ogImage":5362,"ogUrl":5363,"ogSiteName":670,"ogType":671,"canonicalUrls":5363,"schema":5364},"How to use a pull-based (agent-based) approach for GitOps","Learn how GitLab supports agent-based approach for GitOps","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682037/Blog/Hero%20Images/agent-based-gitops-cover-880x587.jpg","https://about.gitlab.com/blog/how-to-use-agent-based-gitops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use a pull-based (agent-based) approach for GitOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2021-06-23\",\n      }",{"title":5360,"description":5361,"authors":5366,"heroImage":5362,"date":5367,"body":5368,"category":791,"tags":5369},[1727],"2021-06-23","\n\nIn the previous post, titled [3 ways to approach GitOps](https://about.gitlab.com/blog/gitops-done-3-ways/), we discussed the many benefits and options that GitLab supports for fulfilling the [GitOps](/topics/gitops/) requirements of customers, whose IT environments are composed of heterogeneous technologies and infrastructures. This post is a 3-part series, in which we delve deeper into these options. In this first part, we cover the pull-based or agent-based approach.\n\n## About a pull-based or agent-based approach\n\nIn this approach, an agent is installed in your infrastructure components to pull changes whenever there is a drift from the desired configuration, which resides in GitLab. Although the infrastructure components could be anything from a physical server or router to a VM or a database, we will focus on a Kubernetes cluster in this section.\n\nIn the following example, the [reconciliation loop](https://about.gitlab.com/solutions/gitops/) is made up of two components: an agent running on the Kubernetes cluster and a server-side service running on the GitLab instance. One of the benefits of this approach is that you don’t have to expose your Kubernetes clusters outside your firewall. Another benefit is its distributed architecture, in that agents running on the infrastructure components are in charge of correcting any drift relieving the server-side from resource consumption. This approach requires the maintenance and installation of agents on all infrastructure components you want to be part of your GitOps flows.\n\n### GitLab Agent for Kubernetes as a pull-based approach\n\n[Introduced](https://about.gitlab.com/releases/2020/09/22/gitlab-13-4-released/#introducing-the-gitlab-kubernetes-agent) as part of GitLab 13.4, the GitLab Agent for Kubernetes runs on your Kubernetes cluster and pulls changes in your infrastructure configuration from GitLab to your cluster keeping your infrastructure configuration from drifting away from its desired state.\n\nGitLab Agent for Kubernetes (the feature) is currently implemented as two components ([architecture doc](https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent/-/blob/master/doc/architecture.md)):\n\n- GitLab Agent for Kubernetes (agentk program): The component that users install into their cluster.\n\n- GitLab Agent for Kubernetes Server (kas program): The server-side counterpart, that runs \"next to GitLab.\"\n\nThe high-level architecture of the GitLab Agent for Kubernetes is depicted below:\n\n![GitLab K8s agent high-level architecture](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/0-K8s-agent-arch.png){: .shadow.small.center.wrap-text}\nGitLab K8s agent high-level architecture.\n{: .note.text-center}\n\nThe **agentk** is installed on your Kubernetes cluster and it is the component that applies updates to the infrastructure. The **kas** is installed on the GitLab instance and it manages the authentication and authorization between **agentk** instances and GitLab, monitors projects for any changes and gathers latest project manifests to send to **agentk** instances.\n\n> **NOTE:** on Gitlab.com, the **kas** is installed and maintained by GitLab. On self-managed instances, the customer needs to install it.\n\nIn the following self-managed instance example, we go through a GitOps flow that leverages the pull-based approach to GitOps.  After the **agentk** component has already been installed on the K8s cluster, the user proceeds to log on to the GitLab instance and creates a project called **gitops-project**:\n\n![Creating the gitops-project](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/1-create-gitops-proj.png){: .shadow.medium.center.wrap-text}\nCreating the gitops-project.\n{: .note.text-center}\n\nThe project **gitops-project** will be the one that will be monitored or observed by the **kas** component. Then, under **gitops-project**, the user creates an empty manifest file called **manifest.yaml**. This is the manifest file that will contain the Infrastructure as Code configuration for this project:\n\n![Manifest file created](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/2-manifest-file-created.png){: .shadow.medium.center.wrap-text}\nManifest file created.\n{: .note.text-center}\n\nNext, the user creates a Kubernetes agent configuration repository project, **kubernetes-agent**, which will contain information pertinent to the **kas** component.\n\n![Creating the kubernetes-agent project](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/3-create-K8s-agent-proj.png){: .shadow.medium.center.wrap-text}\nCreating the kubernetes-agent project.\n{: .note.text-center}\n\nWithin the **kubernetes-agent** project, the user creates a subdirectory **.gitlab/agents/agent1**, where **agent1** is the name given to this specific agent:\n\n![Config.yaml file created](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/4-config-yaml-created.png){: .shadow.medium.center.wrap-text}\nConfig.yaml file created.\n{: .note.text-center}\n\nNotice that in the screenshot above, the project to be observed, **gitops-project**, was created in an earlier step.\n\nThe next step consists of the creation of a GitLab Rails Agent record to associate it with the Kubernetes agent configuration repository project. In the following screenshot, you see the commands that the user enters to first identify the task-runner pod, to log into it, to enter the Rails Console, and finally to create the agent record and a token for it:\n\n![Agent record created](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/5-agent-record-created.png){: .shadow.medium.center.wrap-text}\nAgent record created.\n{: .note.text-center}\n\nIn the above screenshot, the last command uses the agent token to create a secret on the K8s cluster for secured communication between the **agentk** and the **kas** components.\n\nThe **agentk** pod creation on the K8s cluster is the next step. For this, the user creates a **resources.yml** file, in which the secured communication protocol between the **agentk** and the **kas** is specified as shown in the following snippet:\n\n![Websockets line](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/6-wss-line-in-resources-yml.png){: .shadow.medium.center.wrap-text}\nWebSockets communication specified in the resources.yml file.\n{: .note.text-center}\n\nIn the above snippet, secured WebSockets protocol is being used. GitLab also supports gRPC.\n\nOnce the **resources.yml** file is updated with the corresponding GitLab instance information, the user proceeds to create the pod:\n\n![Agentk pod created](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/7-agentk-created.png){: .shadow.medium.center.wrap-text}\nCreation of the **agentk** pod.\n{: .note.text-center}\n\nIn the screenshot above, you can see the execution of the **kubectl apply** that created the **agentk** pod in the K8s cluster.\n\nNow that the **agentk** and **kas** have been installed and are communicating securely with each other, the user can start performing some GitOps flows. Although the [GitLab Flow](https://about.gitlab.com/topics/version-control/what-is-gitlab-flow/) is the recommended approach for DevOps, it is also applicable to GitOps flows; after all GitOps is all about applying the goodness of DevOps to managing [Infrastructure as Code](/topics/gitops/infrastructure-as-code/).\n\nThis means that the user should create an issue and then a merge request, in which all stakeholders can collaborate towards the resolution of the issue. For the sake of brevity, in this technical blog post, we will skip all these steps and show you how updates to the Infrastructure as Code configuration files are automatically applied to the infrastructure components.\n\nNOTE: Fostering Collaboration is a great benefit of GitOps. For more information on this, check out this short [tech video](https://youtu.be/onFpj_wvbLM).\n\nFor example, the user can start making updates to the **manifest.yaml** file under the **gitops-project**, which is being observed by the kas component. Here you can see the user has pasted content into this file:\n\n![Manifest.yaml file updated](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/8-manifest-yaml-updated.png){: .shadow.medium.center.wrap-text}\nManifest.yaml file updated.\n{: .note.text-center}\n\nRemember that this file had been created as an empty file. As soon as the user commits the changes displayed above, the **kas** component will detect the changes and communicate these to the **agentk** component, which is running on the K8s cluster. The **agentk** will immediately apply these changes to the infrastructure. In this example, the user has updated the infrastructure configuration file to have 2 instances of an nginx. As shown in the screenshot below, the **agentk** has applied these updates by the instantiation of 2 nginx pods in the K8s cluster:\n\n![Two nginx pods up and running](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/9-two-nginx-running.png){: .shadow.medium.center.wrap-text}\nGitOps flow instantiates two nginx pods.\n{: .note.text-center}\n\nIf the user were to change the **manifest.yaml** file one more time and increment the replicas of the nginx pod to 3:\n\n![Manifest.yaml file updated with 3 nginx](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/10-manifest-yaml-updated-again.png){: .shadow.medium.center.wrap-text}\nManifest.yaml file updated with 3 nginx instances.\n{: .note.text-center}\n\nAgain, as soon as the commit takes place, the **kas** component detects the update and communicates this to the **agentk** component, which in turn, spins up a third nginx pod in the K8s cluster:\n\n![Three nginx pods up and running](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/11-three-nginx-running.png){: .shadow.medium.center.wrap-text}\nGitOps flow instantiates a third nginx pod.\n{: .note.text-center}\n\nLastly, the user can check the log files of the different components running on GKE, in this example. In the following screenshot, the user can see the **kas** component running on the GitLab instance:\n\n![kas running on GKE](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/12-kas-on-GKE.png){: .shadow.medium.center.wrap-text}\nThe **kas** component running on GKE.\n{: .note.text-center}\n\nAnd then the user can drill down into the log of the **kas** component, and see how it is detecting commits on the project it is observing:\n\n![kas log on GKE](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/13-kas-log-on-GKE.png){: .shadow.medium.center.wrap-text}\nThe **kas** log output on GKE.\n{: .note.text-center}\n\nLikewise, the user can navigate to the **agentk** component of the K8s cluster:\n\n![agentk running on GKE](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/14-agentk-on-GitLab.png){: .shadow.medium.center.wrap-text}\nThe **agentk** component running on GKE.\n{: .note.text-center}\n\nAnd, again drill down to its log to see, how the **agentk** component runs synchronizations with the **kas** component:\n\n![agentk log on GKE](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/15-agentk-log-top-on-GitLab.png){: .shadow.medium.center.wrap-text}\nThe **agentk** log output on GKE.\n{: .note.text-center}\n\nIn the following screenshot, the user sees the log statements indicating that the **agentk** is instantiating a third instance of an nginx pod:\n\n![agentk instantiating a third nginx pod](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/16-agentk-log-synced-on-GitLab.png){: .shadow.medium.center.wrap-text}\nThe **agentk** instantiating a third nginx pod.\n{: .note.text-center}\n\nThe above sections described an example of the setup needed to install and run the GitLab Agent for Kubernetes as well as how projects are monitored and synchronized from GitLab to a running K8s cluster.\n\n## Conclusion\n\nWe have gone over the setup and use of the Agent, which is an integral part of our pull-based or agent-based approach to GitOps. We also covered a GitOps flow that leveraged this agent-based approach, which is a good choice for Kubernetes shops that need to keep their clusters secured and behind their firewall. This approach comes with its drawbacks in that you need to maintain the agents, which also consume the resources of your infrastructure components. In part two of this series, we will discuss the push-based or agentless approach to GitOps.\n\nCover image by [Vincent Ledvina](https://unsplash.com/@vincentledvina?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/grand-tetons?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[534,916,976,9,1731],{"slug":5371,"featured":6,"template":686},"how-to-use-agent-based-gitops","content:en-us:blog:how-to-use-agent-based-gitops.yml","How To Use Agent Based Gitops","en-us/blog/how-to-use-agent-based-gitops.yml","en-us/blog/how-to-use-agent-based-gitops",{"_path":5377,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5378,"content":5384,"config":5389,"_id":5391,"_type":14,"title":5392,"_source":16,"_file":5393,"_stem":5394,"_extension":19},"/en-us/blog/how-tomorrows-tech-affects-sw-dev",{"title":5379,"description":5380,"ogTitle":5379,"ogDescription":5380,"noIndex":6,"ogImage":5381,"ogUrl":5382,"ogSiteName":670,"ogType":671,"canonicalUrls":5382,"schema":5383},"What devs need to know about tomorrow’s tech today","From 5G to edge computing, microservices and more, cutting-edge technologies will be mainstream soon. We asked more than a dozen DevOps practitioners and analysts which technologies developers need to start to understand today.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681675/Blog/Hero%20Images/future-of-software-what-developers-need-to-know.png","https://about.gitlab.com/blog/how-tomorrows-tech-affects-sw-dev","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What devs need to know about tomorrow’s tech today\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-10-21\",\n      }",{"title":5379,"description":5380,"authors":5385,"heroImage":5381,"date":5386,"body":5387,"category":679,"tags":5388},[851],"2020-10-21","\n\n_This is part two of our four-part series on the future of software development. [Part one](/blog/software-developer-changing-role/) examines how the software developer role is changing. Part three looks at [the role artificial intelligence (AI) will play in software development](/blog/ai-in-software-development/), and part four tackles [how to future-proof your developer career](/blog/future-proof-your-developer-career/)._\n\nIf it feels like we’ve been talking about future tech like 5G and edge computing forever, we have. But they’re getting closer to reality which means they should be on a developer’s radar. We asked 14 DevOps practitioners, analysts and GitLab experts which technologies are most likely to have an impact on software development in the next three to five years. Here’s what they said.\n\n## Edge computing comes of age\n\nThe fast-growing Internet of Things (IoT) market – worth $212 billion in 2019 and projected to hit 1.6 trillion in 2025 [according to market research firm Statista](https://www.statista.com/statistics/976313/global-iot-market-size/) – means edge computing may be coming to your DevOps team sooner than you think. Edge computing will challenge developers to literally put processing power within the application (on the “edge,” in other words) rather than having to reach out to the cloud for computations.\n\nToday’s edge computing is largely confined to telecom companies, says [Carlos Eduardo Arango Gutierrez](https://www.linkedin.com/in/eduardo-arango/?originalSubdomain=co), a software engineer at Red Hat (and a [GitLab Hero](/community/heroes/)), but in three to five years he sees front end developers needing to get a handle on this. “Part of my work at RedHat now is a lot of IoT and edge computing and I think every Kubernetes developer today is going to need to be thinking about it,” he says. “Developers are going to need to be thinking about networking but also about new types of routers and hardware architectures to support this.”\n\n## 5G is happening\n\nDespite the immense hype, a 5G wireless network rollout is underway around the world (here’s [an interactive map](https://www.speedtest.net/ookla-5g-map)). Statista predicts between [20 and 50 million 5G connections](https://hackernoon.com/top-10-software-development-trends-for-2020-you-need-to-know-as293690) as soon as the end of next year. Even if that forecast is optimistic, 5G will shortly upend mobile application use as we know it, and thus mobile application development. Dramatically faster download and upload times will give developers the chance to create more-feature-rich applications with better user experiences including potentially both [augmented](https://www.fi.edu/what-is-augmented-reality) and [virtual reality](https://www.wired.com/story/wired-guide-to-virtual-reality/).\n\n## Really, it’s about networking\n\nThat’s all a long way of saying that these cutting edge technologies are going to require developers to understand how to tie them neatly together. “In the future it doesn’t matter if you’re going to be good at the front end and know languages like Go or Java,” Carlos says. “You’re going to need to understand everything about networking. That’s critical to the future.”\n\n## Hardware becomes a factor\n\nSoftware developers tend to take hardware for granted, and why not? Today one phone or laptop is very much like the other but in a few years that will no longer be true. “As the speed of connectivity continues to evolve and as we hit certain thresholds we need to think about how we design solutions to take advantage of that,” says [Rafael Garcia](https://www.linkedin.com/in/jrafaelgarcia/), director of digital services at insurance conglomerate Aflac. “When storage became cheap it changed how you designed solutions and now with connectivity and broadband you don’t have to be worried about size anymore,” he says.\n\nSize is one consideration but there are many others, Carlos adds. Developers must move past the “if it works on a laptop it works everywhere” model and realize the production clusters and the distributed systems will have entirely different requirements for everything from design to security. “In the future, software developers need to understand the world is not your laptop,” he says.\n\n## Code (or secrets), heal thyself\n\nThe idea of self-healing code is something every DevOps team can embrace and it’s something GitLab CEO [Sid Sijbrandij](/company/team/#sytses) sees as a viable possibility. As an early example of this Sid points to [Kubernetes custom resource definitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) because they automatically know the state they should be in. “Viewed through a different lens it’s the same thing in technologies like [Vault](https://www.vaultproject.io),” he explains. “Instead of secrets in a company system lasting for years or months it has dynamic secrets that continually refresh. It’s self-healing for secrets.”\n\n## Microservices go mainstream\n\nYour DevOps team may not have jumped on the [microservices](/topics/microservices/) bandwagon yet – in our 2020 survey only 26% of respondents fully use them – but Sid says they’re key to the future. It will also be important to know how to manage them, he says. “The interactions between services are going to be important particularly when it comes to distributed systems. We’re going to need technology for tracing and troubleshooting services.”\n\n_Why isn’t AI on this list? It’s so critical to the future it will be covered in part three of this series._\n",[9,1477,749],{"slug":5390,"featured":6,"template":686},"how-tomorrows-tech-affects-sw-dev","content:en-us:blog:how-tomorrows-tech-affects-sw-dev.yml","How Tomorrows Tech Affects Sw Dev","en-us/blog/how-tomorrows-tech-affects-sw-dev.yml","en-us/blog/how-tomorrows-tech-affects-sw-dev",{"_path":5396,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5397,"content":5403,"config":5409,"_id":5411,"_type":14,"title":5412,"_source":16,"_file":5413,"_stem":5414,"_extension":19},"/en-us/blog/how-we-built-a-stack-overflow-community-questions-analyzer-and-you-can-too",{"title":5398,"description":5399,"ogTitle":5398,"ogDescription":5399,"noIndex":6,"ogImage":5400,"ogUrl":5401,"ogSiteName":670,"ogType":671,"canonicalUrls":5401,"schema":5402},"How We Built a Stack Overflow Community Questions Analyzer","We wanted to better understand what Stack Overflow GitLab Community members wanted to know, so we automated a way to keep track of it all. Here's a step-by-step look at how we did it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667552/Blog/Hero%20Images/gitlabonstackoverflow.png","https://about.gitlab.com/blog/how-we-built-a-stack-overflow-community-questions-analyzer-and-you-can-too","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we built a Stack Overflow Community questions analyzer (and you can too)\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Arias\"}],\n        \"datePublished\": \"2022-04-28\",\n      }",{"title":5404,"description":5399,"authors":5405,"heroImage":5400,"date":5406,"body":5407,"category":791,"tags":5408},"How we built a Stack Overflow Community questions analyzer (and you can too)",[2141],"2022-04-28","\nBeing part of the GitLab collective is an opportunity to learn first hand about the challenges the community using the DevOps Platform is facing. As a [Collective Member](https://stackoverflow.com/collectives/gitlab) logging between 2-3 times a week in StackOverflow  reading the questions and discussion posted about GitLab and manually sorting them by 'Recent Activity', 'Trending' and using Dates, I asked myself:  how can we leverage this  wealth of data and discover feedback, while finding  the most frequent topics where the community has questions? \n\nThis would be an opportunity to get a quick overview of topics where the community regularly needs help; this would also make it easier for us to create relevant content for them.  Manually sorting and extracting the text of each question wouldn’t be sustainable, so creating an automated way would be the most efficient way to proceed.\n\n## Experimenting with data-oriented content creation\n\nFinding out what the community is working on, and what they need help with while using GitLab, can help us to create better educational content that could expand their understanding of GitLab. To achieve this goal, the solution I created  after a few iterations is depicted below:\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/fontes.png)\n\nWhere the Bill Of Materials consists mainly of:\n\n- GitLab DevOps Platform\n- Stackoverflow API\n- Kubernetes Cluster\n- Open Source Python libraries:\n- scikit-learn (TF-IDF)\n- Streamlit (front-end)\n- Spacy                 \n\nI leveraged the GitLab DevOps Platform to organize the projects using groups:\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/organize.png)\n\nThe Loader project pulls questions about GitLab from the StackOverflow API, pre-processes the text and makes it usable for a second project: a Visualizer to create customized dashboards. \n\nThe automated process executed using the DevOps Platform is outlined below: \n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/automatedprocess.png)\n\n- Pull data from [StackOverflow API](https://api.stackexchange.com/docs)\n- Preprocess the response extracting relevant fields from returned JSON\n- Build a corpus and calculate TF-IDF\n- Scan for security vulnerabilities\n- Review Application and display its resulting dashboards using [Streamlit](https://streamlit.io/)\n- Deploy the built application to a Kubernetes cluster\n\nLoader and Visualizer projects have their own codebase and pipelines, which is helpful if different teams need to work separately on them. However, one project can require the other, which raises the need for  cross-project  automation. \n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/pipeline.png)\n\nThis scenario means a [multi-project pipeline](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html) is useful to automate the whole process. The multi-project pipeline enables use cases such as:\n\n- As an NLP Developer I want to work on the NLP Pipeline in the Loader Project and automatically trigger the creation of a new visualization \n- As a Streamlit Developer I want to work independently in the buttons and data visualization without touching any NLP Pipeline backend  \n\nThe outlined process above is automatically run defining the steps in a [multi-project pipeline](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html) sharing artifact:\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/demo1-gif.gif)\n\n## Finding the most frequently occurring words\n\nThe Feature Engineering step will help me to analyze the text in the whole dataset of GitLab questions. Using a simple yet powerful technique – TF-IDF – we aim to find the most relevant terms utilized by the community. By using this technique in the pipeline execution,  I represent words in numerical values and later rank them in order of importance.  This approach serves as a baseline for further improvements. More detail about this algorithm can be found [here](https://en.wikipedia.org/wiki/Tf%E2%80%93idf).\n\n## Did we achieve any success?\n\nOne run of the multi-pipeline in our solution results in dashboards such as this one:\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/demo2-gif.gif)\n\nAs an end-user of these dashboards I can immediately conclude that the main source of questions are around GitLab CI, pipelines and usage of Docker images. Not bad for a first run!  Having the data processed enables us to ask more questions and use data to answer it, such as, what are the questions from the highest [StackOverflow reputation](https://stackoverflow.com/help/whats-reputation) users ? \n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/questions.png)\n\nCould these questions be inspiration for tutorials for the most advanced users, or the implementation of a new feature? \n\nBecause everyone can contribute, let's take a look at the users who just started gaining their StackOverflow reputation:\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/contributors.png)\n\nThe question about access and reading/writing permissions in Portuguese is interesting. It makes me wonder about content localization and GitLab meetups in Portuguese-speaking countries. Not surprisingly, there were also \tquestions about GitLab CI too as the text processing and ranking found most relevant in the corpus. \n\nDid we achieve any success? Yes, using a baseline technique such as TF-IDF sped up by  DevOps practices allowed us  to find out relevant terms and help us to understand where the majority of the community needs help in their DevOps journey. I have automated many steps that will allow me to focus on data exploration and possible implementation of more complex NLP Techniques rather than infrastructure allocation or manual input of commands and tests.\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/demo-reduced.gif)\n\nAs a Technical Marketing Manager, I want to create content that is relevant to enable or inspire the  community to succeed. \n\nA personal take away: Educating about the latest GitLab DevOps platform capabilities and the problems they solve  is important and so is keeping an eye on the content that might not be related to a new feature but is needed right now.\n\nAre we done? No, quoting Da Vinci's altered quote about [Art](https://www.artshub.com.au/news/features/art-is-never-finished-only-abandoned-262096-2370305/#:~:text=Lottie%20Consalvo%20in%20her%20studio,writers%2C%20and%20creatives%20would%20recognise) but with software: \"Software is never finished, only abandoned.\"\n\nThere is room for improvement and adding capabilities to this project. We continue iterating, listening to the community, and we encourage you to clone these projects, try it yourself, and adjust it with the topics that make sense to you. Create a merge request to improve the codebase and suggest new dashboards ideas!\n\nExplore the [group of projects](https://gitlab.com/tech-marketing/ad-fontes) and take a look at the [dashboard](https://bit.ly/3jeTFQp).\n",[9,267,1515],{"slug":5410,"featured":6,"template":686},"how-we-built-a-stack-overflow-community-questions-analyzer-and-you-can-too","content:en-us:blog:how-we-built-a-stack-overflow-community-questions-analyzer-and-you-can-too.yml","How We Built A Stack Overflow Community Questions Analyzer And You Can Too","en-us/blog/how-we-built-a-stack-overflow-community-questions-analyzer-and-you-can-too.yml","en-us/blog/how-we-built-a-stack-overflow-community-questions-analyzer-and-you-can-too",{"_path":5416,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5417,"content":5423,"config":5428,"_id":5430,"_type":14,"title":5431,"_source":16,"_file":5432,"_stem":5433,"_extension":19},"/en-us/blog/how-we-built-status-page-mvc",{"title":5418,"description":5419,"ogTitle":5418,"ogDescription":5419,"noIndex":6,"ogImage":5420,"ogUrl":5421,"ogSiteName":670,"ogType":671,"canonicalUrls":5421,"schema":5422},"How we built Status Page","Get the scoop on the process behind engineering and troubleshooting the implementation of the Status Page","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681270/Blog/Hero%20Images/red-green-chilli.jpg","https://about.gitlab.com/blog/how-we-built-status-page-mvc","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we built Status Page\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Olena Horal-Koretska\"}],\n        \"datePublished\": \"2020-04-29\",\n      }",{"title":5418,"description":5419,"authors":5424,"heroImage":5420,"date":3429,"body":5426,"category":791,"tags":5427},[5425],"Olena Horal-Koretska","\n\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2020-05-08.\n{: .alert .alert-info .note}\n\nThe [Status Page](https://docs.gitlab.com/ee/operations/incident_management/status_page.html) is a new tool for communicating incident status and maintenance times, and is available to [GitLab Ultimate users](/pricing/ultimate/) (though the frontend is available to anyone). We are building the Status Page at GitLab to provide the best incident management experience both for our internal team and our customers.\n\n###  Current Status update approach\n\nIncident handling in GitLab happens inside the issue in a dedicated public project. The team discusses and posts updates in the issue. Public updates are manually published by engineer-on-call to [status.gitlab.com](https://status.gitlab.com/) every 15 minutes. But this setup is not ideal - responders lose precious time during fire-fight by switching tools and duplicating information. Also having public project for incident management means:\n\n1. Massive load on your instance in the \"hard times\"\n2. Higher monetary cost\n3. No access to status updates if your GitLab instance is down\n4. Sensitive information that comes up in a discussion is public and may cause vulnerability exploit while it is being fixed\n\n### Requirements\n\nOur first customer was the GitLab team. We [dogfood everything](/handbook/engineering/development/principles/#dogfooding), and the Status Page was no exception. So requirements were built based on the needs of our internal team:\n\n1. **No tool switching for incidents updates:** People that handle incidents have enough with responsibilities with fixing incidents so we should spare them the countless pings about the incident. These pings might be about what happened, the status of the incident, and how the incident is progressing. Granted, there are some users who want to receive immediate updates on the incident. Incident status should be updated in one place both for peer-problem-solvers and the public.\n\n1. **Ability to control level of visibility: Determine which updates are published and which are not**: When you have a problem in your product you do not necessarily want to shout it out: \"Hey, you malicious hacker, we've got a problem - go exploit it.\" Instead, you want your team address the vulnerability calmly and in a timely manner. Balancing the need for sending assuasive messages to the public without distracting fire-fight team can be achieved when you have control over the degree of visibility for the incident.\n\n1. **Display all types of data from GitLab incident description and comments on Status Page.** As incidents are handled in GitLab issues, there are a few options for how the data is represented to communicate the problem and/or solution, including images, embedded charts, etc. This rich data must be available in public updates.\n\n## Building the Status Page\n\nWe updated the design of the Status Page to address all of the concerns described in the previous section. Before we started building the Status Page, we lead a [Spike exercise](/handbook/engineering/development/ops/monitor/#spike) because we weren't entirely sure which approach to take for implementation.\n\nOur initial plan was to leverage one of the many open-source solutions for implementing the Status Page, but none of them could really satisfy all of our requirements. So instead we decided to go ahead and build our own implementation.\n\n#### Backend and data scraping\n\n When we started, we first brainstormed all the different solutions we could use to collect data from incidents issues to be automatically published to the Status Page:\n\n**Option 1: (GitLab) Webhooks: User sets up the endpoint to which GitLab will post incident updates**\n![Webhook](https://about.gitlab.com/images/blogimages/status-page/webhook.png){: .center}\n\n**Option 2: Alerts coming directly from Prometheus Alertmanager**\n![ALerts](https://about.gitlab.com/images/blogimages/status-page/alerts.png){: .center}\n\n**Option 3: Status page itself monitoring other services**\n![Monitoring](https://about.gitlab.com/images/blogimages/status-page/monitoring.png){: .center}\n\n**Option 4: Users manually pushing a markdown file to git or calling the API with some utility, e.g., `curl`**\n![Git Commit](https://about.gitlab.com/images/blogimages/status-page/gitcommit.png){: .center}\n\n**Option 5: CI job running manually or scheduled to run during certain intervals**\n![CI Job](https://about.gitlab.com/images/blogimages/status-page/cijob.png){: .center}\n\nThose approaches required either manual user input, additional CI resources, or building a sophisticated piece of software that was unnecessary for this case.\n\nWe didn't implement any of the five flows. But decided that the incident issue will be converted to JSON and published to the Status Page by a background job. This means no over-engineering and instant feedback on the Status Page.\n\n#### Frontend\n\nHere at GitLab we love VueJS so much we contribute to it, so the team has great expertise in VueJS. Consequently, our component library [GitLab UI](https://gitlab-org.gitlab.io/gitlab-ui/) and styling utilities are based on VueJS.\n\nYou could guess that we didn't have to debate which frontend framework to use! Besides the UI library as a dependency, GitLab provides `eslint`, `stylelint`, and SVGs as npm packages. It was very convenient to have them handy, as any new project setup always raises lots of questions about best practices and best tools. With all of this, the Status Page was able to be GitLab-branded. Feel free to use GitLab utilities in your own project too.\n\nNotably, the Status Page is a stand-alone application, hosted in a separate GitLab repository that uses JSON files generated by a background job. It is distributed under MIT license and can be used separately from GitLab given that correct data source is provisioned. You'll get the best experience by using our Status Page with GitLab.\n\nFrontend along with generated JSON data sources is published to [cloud storage](https://www.youtube.com/watch?v=27GgP6BXR6A). We currently only support Amazon S3 because we are hosted on Google Cloud and want our Status Page to be available even if Google Cloud (and, by extension, GitLab.com) is down. Credentials are provided by the user when setting up incident tracking project for Status Page.\n\n#### The Status page solution\n\nOnce an incident issue is created/updated in GitLab (manually or via [alert](https://docs.gitlab.com/ee/operations/incident_management/index.html#incident-management)), its description (with all types of data) along with comments that were marked as public will be picked by background job, converted to JSON, and mirrored on the Status Page.\n\n![Status Page flow](https://about.gitlab.com/images/blogimages/status-page/status-page-flow.png){: .center}\n\n### Hat tip to our Monitor:Health team\n\nThere are many more technical details that can be explained and that still to be implemented. It is the collaborative efforts of the [Monitor:Health team](/handbook/engineering/development/ops/monitor/respond/) that help make this possible. I'm thankful for all heated discussions, great insights, quick iterations, fast fails – the collaboration from the Monitor: Health team are advantages that have played out in the implementation of the Status Page feature.\n\n### Give the Status Page a try\n\nHere's a great [step by step guide](https://docs.gitlab.com/ee/operations/incident_management/status_page.html) on how to set-up a Status Page for your project with GitLab.\n\nEnjoy and may all your systems be operational!\n\nCover image by [Melina Yakas](https://unsplash.com/@myakas16) on [Unsplash](https://unsplash.com/photos/OBWEXPOurWo)\n{: .note}\n",[9],{"slug":5429,"featured":6,"template":686},"how-we-built-status-page-mvc","content:en-us:blog:how-we-built-status-page-mvc.yml","How We Built Status Page Mvc","en-us/blog/how-we-built-status-page-mvc.yml","en-us/blog/how-we-built-status-page-mvc",{"_path":5435,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5436,"content":5442,"config":5448,"_id":5450,"_type":14,"title":5451,"_source":16,"_file":5452,"_stem":5453,"_extension":19},"/en-us/blog/how-we-diagnosed-and-resolved-redis-latency-spikes",{"title":5437,"description":5438,"ogTitle":5437,"ogDescription":5438,"noIndex":6,"ogImage":5439,"ogUrl":5440,"ogSiteName":670,"ogType":671,"canonicalUrls":5440,"schema":5441},"How we diagnosed and resolved Redis latency spikes with BPF and other tools","How we uncovered a three-phase cycle involving two distinct saturation points and a simple fix to break that cycle.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667913/Blog/Hero%20Images/clocks.jpg","https://about.gitlab.com/blog/how-we-diagnosed-and-resolved-redis-latency-spikes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we diagnosed and resolved Redis latency spikes with BPF and other tools\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Matt Smiley\"}],\n        \"datePublished\": \"2022-11-28\",\n      }",{"title":5437,"description":5438,"authors":5443,"heroImage":5439,"date":5445,"body":5446,"category":791,"tags":5447},[5444],"Matt Smiley","2022-11-28","\n\nIf you enjoy performance engineering and peeling back abstraction layers to ask underlying subsystems to explain themselves, this article’s for you. The context is a chronic Redis latency problem, and you are about to tour a practical example of using BPF and profiling tools in concert with standard metrics to reveal unintuitive behaviors of a complex system.\n\nBeyond the tools and techniques, we also use an iterative hypothesis-testing approach to compose a behavior model of the system dynamics. This model tells us what factors influence the problem's severity and triggering conditions.\n\nUltimately, we find the root cause, and its remedy is delightfully boring and effective. We uncover a three-phase cycle involving two distinct saturation points and a simple fix to break that cycle. Along the way, we inspect aspects of the system’s behavior using stack sampling profiles, heat maps and flamegraphs, experimental tuning, source and binary analysis, instruction-level BPF instrumentation, and targeted latency injection under specific entry and exit conditions.\n\nIf you are short on time, the takeaways are summarized at the end. But the journey is the fun part, so let's dig in!\n\n## Introducing the problem: Chronic latency \n\nGitLab makes extensive use of Redis, and, on GitLab.com SaaS, we use [separate Redis clusters](/handbook/engineering/infrastructure/production/architecture/#redis-architecture) for certain functions. This tale concerns a Redis instance acting exclusively as a least recently used (LRU) cache.\n\nThis cache had a chronic latency problem that started occurring intermittently over two years ago and in recent months had become significantly worse: Every few minutes, it suffered from bursts of very high latency and corresponding throughput drop, eating into its Service Level Objective (SLO). These latency spikes impacted user-facing response times and [burned error budgets](https://gitlab.com/gitlab-org/gitlab/-/issues/360578#note_966597336) for dependent features, and this is what we aimed to solve.\n\n**Graph:** Spikes in the rate of extremely slow (1 second) Redis requests, each corresponding to an eviction burst\n\n![Graph showing spikes in the slow request rate every few minutes](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/00_redis_slow_request_rate_spikes_during_each_eviction_burst.png)\n\nIn prior work, we had already completed several mitigating optimizations. These sufficed for a while, but organic growth had resurfaced this as an important [long-term scaling problem](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#why-is-it-important-to-get-to-the-root-of-the-latency-spikes). We had also already ruled out externally triggered causes, such as request floods, connection rate spikes, host-level resource contention, etc. These latency spikes were consistently associated with memory usage reaching the eviction threshold (`maxmemory`), not by changes in client traffic patterns or other processes competing with Redis for CPU time, memory bandwidth, or network I/O.\n\nWe [initially thought](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1567) that Redis 6.2’s new [eviction throttling mechanism](https://github.com/redis/redis/pull/7653) might alleviate our eviction burst overhead. It did not. That mechanism solves a different problem: It prevents a stall condition where a single call to `performEvictions` could run arbitrarily long. In contrast, during this analysis we [discovered](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_977816216) that our problem (both before and after upgrading Redis) was related to numerous calls collectively reducing Redis throughput, rather than a few extremely slow calls causing a complete stall.\n\nTo discover our bottleneck and its potential solutions, we needed to investigate Redis’s behavior during our workload’s eviction bursts.\n\n## A little background on Redis evictions\n\nAt the time, our cache was oversubscribed, trying to hold more cache keys than the [configured `maxmemory` threshold](https://redis.io/docs/reference/eviction/) could hold, so evictions from the LRU cache were expected. But the dense concentration of that eviction overhead was surprising and troubling.\n\nRedis is essentially single-threaded. With a few exceptions, the “main” thread does almost all tasks serially, including handling client requests and evictions, among other things. Spending more time on X means there is less remaining time to do Y, so think about queuing behavior as the story unfolds.\n\nWhenever Redis reaches its `maxmemory` threshold, it frees memory by evicting some keys, aiming to do just enough evictions to get back under `maxmemory`. However, contrary to expectation, the metrics for memory usage and eviction rate (shown below) indicated that instead of a continuous steady eviction rate, there were abrupt burst events that freed much more memory than expected. After each eviction burst, no evictions occurred until memory usage climbed back up to the `maxmemory` threshold again.\n\n**Graph:** Redis memory usage drops by 300-500 MB during each eviction burst:\n\n![Memory usage repeatedly rises gradually to 64 GB and then abruptly drops](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/01_redis_memory_usage_dips_during_eviction_bursts.png)\n\n**Graph:** Key eviction spikes match the timing and size of the memory usage dips shown above\n\n![Eviction counter shows a large spike each time the previous graph showed a large memory usage drop](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/02_redis_eviction_bursts.png)\n\nThis apparent excess of evictions became the central mystery. Initially, we thought answering that question might reveal a way to smooth the eviction rate, spreading out the overhead and avoiding the latency spikes. Instead, we discovered that these bursts are an interaction effect that we need to avoid, but more on that later.\n\n## Eviction bursts cause CPU saturation\n\nAs shown above, we had found that these latency spikes correlated perfectly with large spikes in the cache’s eviction rate, but we did not yet understand why the evictions were concentrated into bursts that last a few seconds and occur every few minutes.\n\nAs a first step, we wanted to verify a causal relationship between eviction bursts and latency spikes.\n\nTo test this, we used [`perf`](https://www.brendangregg.com/perf.html) to run a CPU sampling profile on the Redis main thread. Then we applied a filter to split that profile, isolating the samples where it was calling the [`performEvictions` function](https://github.com/redis/redis/blob/6.2.6/src/evict.c#L512). Using [`flamescope`](https://github.com/Netflix/flamescope), we can visualize the profile’s CPU usage as a [subsecond offset heat map](https://www.brendangregg.com/HeatMaps/subsecondoffset.html), where each second on the X axis is folded into a column of 20 msec buckets along the Y axis. This visualization style highlights sub-second activity patterns. Comparing these two heat maps confirmed that during an eviction burst, `performEvictions` is starving all other main thread code paths for CPU time.\n\n**Graph:** Redis main thread CPU time, excluding calls to `performEvictions`\n\n![Heat map shows one large gap and two small gaps in an otherwise uniform pattern of 70 percent to 80 percent CPU usage](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/03_heat_map_of_redis_main_thread_during_eviction_burst__excluding_performEvictions.png)\n\n**Graph:** Remainder of the same profile, showing only the calls to `performEvictions`\n\n![This heat map shows the gaps in the previous heap map were CPU time spent performing evictions](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/04_heat_map_of_redis_main_thread_during_eviction_burst__only_performEvictions.png)\n\nThese results confirm that eviction bursts are causing CPU starvation on the main thread, which acts as a throughput bottleneck and increases Redis’s response time latency.  These CPU utilization bursts typically lasted a few seconds, so they were too short-lived to trigger alerts but were still user impacting.\n\nFor context, the following flamegraph shows where `performEvictions` spends its CPU time. There are a few interesting things here, but the most important takeaways are:\n* It gets called synchronously by `processCommand` (which handles all client requests).\n* It handles many of its own deletes. Despite its name, the `dbAsyncDelete` function only delegates deletes to a helper thread under certain conditions which turn out to be rare for this workload.\n\n![Flamegraph of calls to function performEvictions, as described above](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/05_flamegraph_of_redis_main_thread_during_eviction_burst__only_performEvictions.png)\n\nFor more details on this analysis, see the [walkthrough and methodology](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_854745083).\n\n## How fast are individual calls to `performEvictions`?\n\nEach incoming request to Redis is handled by a call to `processCommand`, and it always concludes by calling the `performEvictions` function. That call to `performEvictions` is frequently a no-op, returning immediately after checking that the `maxmemory` threshold has not been breached. But when the threshold is exceeded, it will continue evicting keys until it either reaches its `mem_tofree` goal or exceeds its configured time limit per call.\n\nThe CPU heat maps shown earlier proved that `performEvictions` calls were collectively consuming a large majority of CPU time for up to several seconds.\n\nTo complement that, we also measured the wall clock time of individual calls.\n\nUsing the `funclatency` CLI tool (part of the [BCC suite of BPF tools](https://github.com/iovisor/bcc)), we measured call duration by instrumenting entry and exit from the `performEvictions` function and aggregated those measurements into a [histogram](https://en.wikipedia.org/wiki/Histogram) at 1-second intervals. When no evictions were occurring, the calls were consistently low latency (4-7 usecs/call). This is the no-op case described above (including 2.5 usecs/call of instrumentation overhead). But during an eviction burst, the results shift to a bimodal distribution, including a combination of the fast no-op calls along with much slower calls that are actively performing evictions:\n\n```\n$ sudo funclatency-bpfcc --microseconds --timestamp --interval 1 --duration 600 --pid $( pgrep -o redis-server ) '/opt/gitlab/embedded/bin/redis-server:performEvictions'\n...\n23:54:03\n     usecs               : count     distribution\n         0 -> 1          : 0        |                                        |\n         2 -> 3          : 576      |************                            |\n         4 -> 7          : 1896     |****************************************|\n         8 -> 15         : 392      |********                                |\n        16 -> 31         : 84       |*                                       |\n        32 -> 63         : 62       |*                                       |\n        64 -> 127        : 94       |*                                       |\n       128 -> 255        : 182      |***                                     |\n       256 -> 511        : 826      |*****************                       |\n       512 -> 1023       : 750      |***************                         |\n```\n\nThis measurement also directly confirmed and quantified the throughput drop in Redis requests handled per second: The call rate to `performEvictions` (and hence to `processCommand`) dropped to 20% of its norm from before the evictions began, from 25K to 5K calls per second.\n\nThis has a huge impact on clients: New requests are arriving at 5x the rate they are being completed. And crucially, we will see soon that this asymmetry is what drives the eviction burst.\n\nFor more details on this analysis, see the [safety check](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_857869826) for instrumentation overhead and the [results walkthrough](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_857907521). And for more general reference, the BPF instrumentation overhead estimate is based on these [benchmark results](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1383).\n\n## Experiment: Can tuning mitigate eviction-driven CPU saturation?\n\nThe analyses so far had shown that evictions were severely starving the Redis main thread for CPU time. There were still important unanswered questions (which we will return to shortly), but this was already enough info to [suggest some experiments](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_859236777) to test potential mitigations:\n* Can we spread out the eviction overhead so it takes longer to reach its goal but consumes a smaller percentage of the main thread’s time?\n* Are evictions freeing more memory than expected due to scheduling a lot of keys to be asynchronously deleted by the [lazyfree mechanism](https://github.com/redis/redis/blob/6.2.6/redis.conf#L1079)? Lazyfree is an optional feature that lets the Redis main thread [delegate to an async helper thread](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_859236777) the expensive task of deleting keys that have more than 64 elements. These async evictions do not count immediately towards the eviction loop’s memory goal, so if many keys qualify for lazyfree, this could potentially drive many extra iterations of the eviction loop.\n\nThe [answers](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/7172#note_971197943) to both turned out to be no:\n* Reducing `maxmemory-eviction-tenacity` to its minimum setting still did not make `performEvictions` cheap enough to avoid accumulating a request backlog. It did increase response rate, but arrival rate still far exceeded it, so this was not an effective mitigation.\n* Disabling `lazyfree-lazy-eviction` did not prevent the eviction burst from dropping memory usage far below `maxmemory`. Those lazyfrees represent a small percentage of reclaimed memory. This rules out one of the potential explanations for the mystery of excessive memory being freed.\n\nHaving ruled out two potential mitigations and one candidate hypothesis, at this point we return to the pivotal question: Why are several hundred extra megabytes of memory being freed by the end of each eviction burst?\n\n## Why do evictions occur in bursts and free too much memory?\n\nEach round of eviction aims to free just barely enough memory to get back under the `maxmemory` threshold.\n\nWith a steady rate of demand for new memory allocations, the eviction rate should be similarly steady. The rate of arriving cache writes does appear to be steady. So why are evictions happening in dense bursts, rather than smoothly? And why do they reduce memory usage on a scale of hundreds of megabytes rather than hundreds of bytes?\n\nSome potential explanations to explore:\n* Do evictions only end when a large key gets evicted, spontaneously freeing enough memory to skip evictions for a while? No, the memory usage drop is far bigger than the largest keys in the dataset.\n* Do deferred lazyfree evictions cause the eviction loop to overshoot its goal, freeing more memory than intended? No, the above experiment disproved this hypothesis.\n* Is something causing the eviction loop to sometimes calculate an unexpectedly large value for its `mem_tofree` goal? We explore this next. The answer is no, but checking it led to a new insight.\n* Is a feedback loop causing evictions to become somehow self-amplifying? If so, what conditions lead to entering and leaving this state? This turned out to be correct.\n\nThese were all plausible and testable hypotheses, and each would point towards a different solution to the eviction-driven latency problem.\n\nThe first two hypotheses we have already eliminated.\n\nTo test the next two, we built custom BPF instrumentation to peek at the calculation of `mem_tofree` at the start of each call to `performEvictions`.\n\n## Observing the `mem_tofree` calculation with `bpftrace`\n\nThis part of the investigation was a personal favorite and led to a critical realization about the nature of the problem.\n\nAs noted above, our two remaining hypotheses were:\n* an unexpectedly large `mem_tofree` goal\n* a self-amplifying feedback loop\n\nTo differentiate between them, we used [`bpftrace`](https://github.com/iovisor/bpftrace) to instrument the calculation of `mem_tofree`, looking at its input variables and results.\n\nThis set of measurements directly tests the following:\n* Does each call to `performEvictions` aim to free a small amount of memory -- perhaps roughly the size of an average cache entry? If `mem_tofree` ever approaches hundreds of megabytes, that would confirm the first hypothesis and reveal what part of the calculation was causing that large value. Otherwise, it rules out the first hypothesis and makes the feedback loop hypothesis more likely.\n* Does the replication buffer size significantly influence `mem_tofree` as a feedback mechanism? Each eviction adds to this buffer, just like normal writes do. If this buffer grows large (possibly partly due to evictions) and then abruptly shrinks (due to the peer consuming it), that would cause a spontaneous large drop in memory usage, ending evictions and instantly reducing memory usage. This is one potential way for evictions to drive a feedback loop.\n\nTo peek at the values of the `mem_tofree` calculation ([script](https://gitlab.com/gitlab-com/gl-infra/scalability/uploads/cab2cd03231f8dd4819f77b44d768cb9/redis_snoop.getMaxmemoryState.sha_25a228b839a93a1395907a03f83e1eee448b0f14.production_thresholds.bt)), we needed to isolate the [correct call from `performEvictions`](https://github.com/redis/redis/blob/6.2.6/src/evict.c#L523) to the [`getMaxmemoryState`](https://github.com/redis/redis/blob/6.2.6/src/evict.c#L374-L407) function and reverse engineer its assembly to find the right instruction and register to instrument for each of the source code level variables that we wanted to capture. From that data we generate histograms for each of the following variables:\n\n```\nmem_reported = zmalloc_used_memory()        // All used memory tracked by jemalloc\noverhead = freeMemoryGetNotCountedMemory()  // Replication output buffers + AOF buffer\nmem_used = mem_reported - overhead          // Non-exempt used memory\nmem_tofree = mem_used - maxmemory           // Eviction goal\n```\n\n_Caveat:_ Our [custom BPF instrumentation](https://gitlab.com/gitlab-com/gl-infra/scalability/uploads/cab2cd03231f8dd4819f77b44d768cb9/redis_snoop.getMaxmemoryState.sha_25a228b839a93a1395907a03f83e1eee448b0f14.production_thresholds.bt) is specific to this particular build of the `redis-server` binary, since it attaches to virtual addresses that are likely to change the next time Redis is compiled. But the approach is able to be generalized. Treat this as a concrete example of using BPF to inspect source code variables in the middle of a function call without having to rebuild the binary. Because we are peeking at the function’s intermediate state and because the compiler inlined this function call, we needed to do binary analysis to find the correct instrumentation points. In general, peeking at a function’s arguments or return value is easier and more portable, but in this case it would not suffice.\n\nThe results:\n* Ruled out the first hypothesis: Each call to `performEvictions` had a small target value (`mem_tofree` \u003C 2 MB). This means each call to `performEvictions` did a small amount of work. Redis’s mysterious rapid drop in memory usage cannot have been caused by an abnormally large `mem_tofree` target evicting a big batch of keys all at once. Instead, there must be many calls collectively driving down memory usage.\n* The replication output buffers remained consistently small, ruling out one of the potential feedback loop mechanisms.\n* Surprisingly, `mem_tofree` was usually 16 KB to 64 KB, which is larger than a typical cache entry. This size discrepancy hints that cache keys may not be the main source of the memory pressure perpetuating the eviction burst once it begins.\n\nAll of the above results were consistent with the feedback loop hypothesis.\n\nIn addition to answering the initial questions, we got a bonus outcome: Concurrently measuring both `mem_tofree` and `mem_used` revealed a crucial new fact – _the memory reclaim is a completely distinct phase from the eviction burst_.\n\nReframing the pathology as exhibiting separate phases for evictions versus memory reclaim led to a series of realizations, described in the next section. From that emerged a coherent hypothesis explaining all the observed properties of the pathology.\n\nFor more details on this analysis, see [methodology notes](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_982498636), [build notes](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_982499538) supporting the disassembly of the Redis binary, and [initial interpretations](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_977994182).\n\n## Three-phase cycle\n\nWith the above results indicating a distinct separation between the evictions and the memory reclaim, we can now concisely characterize [three phases](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_982623949) in the cycle of eviction-driven latency spikes.\n\n**Graph:** Diagram (not to scale) comparing memory and CPU usage to request and response rates during each of the three phases\n\n![Diagram summarizes the text that follows, showing CPU and memory saturate in Phase 2 until request rate drops to match response rate, after which they recover](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/06_3_phase_cycle_of_eviction_bursts.png)\n\nPhase 1: Not saturated (7-15 minutes)\n* Memory usage is below `maxmemory`. No evictions occur during this phase.\n* Memory usage grows organically until reaching `maxmemory`, which starts the next phase.\n\nPhase 2: Saturated memory and CPU (6-8 seconds)\n* When memory usage reaches `maxmemory`, evictions begin.\n* Evictions occur only during this phase, and they occur intermittently and frequently.\n* Demand for memory frequently exceeds free capacity, repeatedly pushing memory usage above `maxmemory`. Throughout this phase, memory usage oscillates close to the `maxmemory` threshold, evicting a small amount of memory at a time, just enough to get back under `maxmemory`.\n\nPhase 3: Rapid memory reclaim (30-60 seconds)\n* No evictions occur during this phase.\n* During this phase, something that had been holding a lot of memory starts quickly and steadily releasing it.\n* Without the overhead of running evictions, CPU time is again spent mostly on handling requests (starting with the backlog that accumulated during Phase 2).\n* Memory usage drops rapidly and steadily. By the time this phase ends, hundreds of megabytes have been freed. Afterwards, the cycle restarts with Phase 1.\n\nAt the transition between Phase 2 and Phase 3, evictions abruptly ended because memory usage stays below the `maxmemory` threshold.\n\nReaching that transition point where memory pressure becomes negative signals that whatever was driving the memory demand in Phase 2 has started releasing memory faster than it is consuming it, shrinking the footprint it had accumulated during the previous phase.\n\nWhat is this **mystery memory consumer** that bloats its demand during Phase 2 and frees it during Phase 3?\n\n## The mystery revealed\n\n[Modeling the phase transitions](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_982651298) gave us some useful constraints that a viable hypothesis must satisfy. The mystery memory consumer must:\n* quickly bloat its footprint to hundreds of megabytes on a timescale of less than 10 seconds (the duration of Phase 2), under conditions triggered by the start of an eviction burst\n* quickly release its accumulated excess on a timescale of just tens of seconds (the duration of Phase 3), under the conditions immediately following an eviction burst\n\n**The answer:** The client input/output buffers meet those constraints to be the mystery memory consumer.\n\nHere is how that hypothesis plays out:\n* During Phase 1 (healthy state), the Redis main thread’s CPU usage is already fairly high. At the start of Phase 2, when evictions begin, the eviction overhead saturates the main thread’s CPU capacity, quickly dropping response rate below the incoming request rate.\n* This throughput mismatch between arrivals versus responses **is itself the amplifier** that takes over driving the eviction burst. As the size of that rate gap increases, the proportion of time spent doing evictions also increases.\n* Accumulating a backlog of requests requires memory, and that backlog continues to grow until enough clients are stalled that the arrival rate drops to match the response rate. As clients stall, the arrival rate falls, and with it the memory pressure, eviction rate, and CPU overhead begin to reduce.\n* At the equilibrium point when arrival rate falls to match response rate, memory demand is satisfied and evictions stop (ending Phase 2). Without the eviction overhead, more CPU time is available to process the backlog, so response rate increases above request arrival rate. This recovery phase steadily consumes the request backlog, incrementally freeing memory as it goes (Phase 3).\n* Once the backlog is resolved, the arrival and response rates match again. CPU usage is back to its Phase 1 norm, and memory usage has temporarily dropped in proportion to the max size of Phase 2’s request backlog.\n\nWe confirmed this hypothesis via a [latency injection experiment](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_987049036) showing that queuing alone explains the pathology. This outcome supports the conclusion that the extra memory demand originates from response rate falling below request arrival rate.\n\n## Remedies: How to avoid entering the eviction burst cycle\n\nNow that we understand the dynamics of the pathology, we can draw confident conclusions about viable solutions.\n\nRedis evictions are only self-amplifying when all of the following conditions are present:\n* **Memory saturation:** Memory usage reaches the `maxmemory` limit, causing evictions to start.\n* **CPU saturation:** The baseline CPU usage by the Redis main thread’s normal workload is close enough to a whole core that the eviction overhead pushes it to saturation. This reduces the response rate below request arrival rate, inducing self-amplification via increased memory demand for request buffering.\n* **Many active clients:** The saturation only lasts as long as request arrival rate exceeds response rate. Stalled clients no longer contribute to that arrival rate, so the saturation lasts longer and has a greater impact if Redis has many active clients still sending requests.\n\nViable remedies include:\n* Avoid memory saturation by any combination of the following to make peak memory usage less than the `maxmemory` limit:\n  * Reduce cache time to live (TTL)\n  * Increase `maxmemory` (and host memory if needed, but watch out for [`numa_balancing` CPU overhead](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1889) on hosts with multiple NUMA nodes)\n  * Adjust client behavior to avoid writing unnecessary cache entries\n  * Split the cache among multiple instances (sharding or functional partitioning, helps avoid both memory and CPU saturation)\n* Avoid CPU saturation by any combination of the following to make peak CPU usage for the workload plus eviction overhead be less than 1 CPU core:\n  * Use the fastest processor available for single-threaded instructions per second\n  * Isolate the redis-server process (particularly its main thread) from any other competing CPU-intensive processes (dedicated host, taskset, cpuset)\n  * Adjust client behavior to avoid unnecessary cache lookups or writes\n  * Split the cache among multiple instances (sharding or functional partitioning, helps avoid both memory and CPU saturation)\n  * Offload work from the Redis main thread (io-threads, lazyfree)\n  * Reduce eviction tenacity (only gives a minor benefit in our experiments)\n\nMore exotic potential remedies could include a new Redis feature. One idea is to exempt ephemeral allocations like client buffers from counting towards the `maxmemory` limit, instead applying that limit only to key storage. Alternatively, we could limit evictions to only consume at most a configurable percentage of the main thread’s time, so that most of its time is still spent on request throughput rather than eviction overhead.\n\nUnfortunately, either of those features would trade one failure mode for another, reducing the risk of eviction-driven CPU saturation while increasing the risk of unbounded memory growth at the process level, which could potentially saturate the host or cgroup and lead to an OOM, or out of memory, kill. That trade-off may not be worthwhile, and in any case it is not currently an option.\n\n## Our solution\n\nWe had already exhausted the low-hanging fruit for CPU efficiency, so we focused our attention on avoiding memory saturation.\n\nTo improve the cache’s memory efficiency, we [evaluated](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_990891708) which types of cache keys were using the most space and how much [`IDLETIME`](https://redis.io/commands/object-idletime/) they had accrued since last access. This memory usage profile identified some rarely used cache entries (which waste space), helped inform the TTL, or time to live, tuning by first focusing on keys with a high idle time, and highlighted some useful potential cutpoints for functionally partitioning the cache.\n\nWe [decided](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_1014582669) to concurrently pursue several cache efficiency improvements and opened an [epic](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/764) for it. The goal was to avoid chronic memory saturation, and the main action items were:\n* Iteratively reduce the cache’s [default TTL](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1854) from 2 weeks to 8 hours (helped a lot!)\n* Switch to [client-side caching](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_1026821730) for certain cache keys (efficiently avoids spending shared cache space on non-shared cache entries)\n* [Partition](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/762) a set of cache keys to a separate Redis instance\n\nThe TTL reduction was the simplest solution and turned out to be a big win. One of our main concerns with TTL reduction was that the additional cache misses could potentially increase workload on other parts of the infrastructure. Some cache misses are more expensive than others, and our metrics are not granular enough to quantify the cost of cache misses per type of cache entry. This concern is why we applied the TTL adjustment incrementally and monitored for SLO violations. Fortunately, our inference was correct: Reducing TTL did not significantly reduce the cache hit rate, and the additional cache misses did not cause noticeable impact to downstream subsystems.\n\nThe TTL reduction turned out to be sufficient to drop memory usage consistently a little below its saturation point.\n\nIncreasing `maxmemory` had initially not been feasible because the original peak memory demand (prior to the efficiency improvements) was expected to be larger than the max size of the VMs we use for Redis. However, once we dropped memory demand below saturation, then we could confidently [provision headroom](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1868) for future growth and re-enable [saturation alerting](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1883).\n\n## Results\n\nThe following graph shows Redis memory usage transitioning out of its chronically saturated state, with annotations describing the milestones when latency spikes ended and when saturation margin became wide enough to be considered safe:\n\n![Redis memory usage stops showing a flat top saturation](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/07_epic_results__memory_saturation_avoided_by_TTL_reductions.png)\n\nZooming into the days when we rolled out the TTL adjustments, we can see the harmful eviction-driven latency spikes vanish as we drop memory usage below its saturation point, exactly as predicted:\n\n![Redis memory usage starts as a flat line and then falls below that saturation line](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/08_results__redis_memory_usage_stops_saturating.png)\n\n![Redis response time spikes stop occurring at the exact point when memory stops being saturated](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/09_results__redis_latency_spikes_end.png)\n\nThese eviction-driven latency spikes had been the biggest cause of slowess in Redis cache.\n\nSolving this source of slowness significantly improved the user experience. This 1-year lookback shows only the long-tail portion of the improvement, not even the full benefit.  Each weekday had roughly 2 million Redis requests slower than 1 second, until our fix in mid-August:\n\n![Graph of the daily count of Redis cache requests slower than 1 second, showing roughly 2 million slow requests per day on weekdays until mid-August, when the TTL adjustments were applied](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/10_results__1_year_retrospective_of_slow_redis_requests_per_day.png)\n\n## Conclusions\n\nWe solved a long-standing latency problem that had been worsening as the workload grew, and we learned a lot along the way. This article focuses mostly on the Redis discoveries, since those are general behaviors that some of you may encounter in your travels. We also developed some novel tools and analytical methods and uncovered several useful environment-specific facts about our workload, infrastructure, and observability, leading to several additional improvements and proposals not mentioned above.\n\nOverall, we made several efficiency improvements and broke the cycle that was driving the pathology. Memory demand now stays well below the saturation point, eliminating the latency spikes that were burning error budgets for the development teams and causing intermittent slowness for users. All stakeholders are happy, and we came away with deeper domain knowledge and sharper skills!\n\n## Key insights summary\n\nThe following notes summarize what we learned about Redis eviction behavior (current as of version 6.2):\n* The same memory budget (`maxmemory`) is shared by key storage and client connection buffers. A spike in demand for client connection buffers counts towards the `maxmemory` limit, in the same way that a spike in key inserts or key size would.\n* Redis performs evictions in the foreground on its main thread. All time spent in `performEvictions` is time not spent handling client requests. Consequently, during an eviction burst, Redis has a lower throughput ceiling.\n* If eviction overhead saturates the main thread’s CPU, then response rate falls below request arrival rate. Redis accumulates a request backlog (which consumes memory), and clients experience this as slowness.\n* The memory used for pending requests requires more evictions, driving the eviction burst until enough clients are stalled that arrival rate falls back below response rate. At that equilibrium point, evictions stop, eviction overhead vanishes, Redis rapidly handles its request backlog, and that backlog’s memory gets freed.\n* Triggering this cycle requires all of the following:\n  * Redis is configured with a `maxmemory` limit, and its memory demand exceeds that size. This memory saturation causes evictions to begin.\n  * Redis main thread’s CPU utilization is high enough under its normal workload that having to also perform evictions drives it to CPU saturation. This reduces response rate below request rate, causing a growing request backlog and high latency.\n  * Many active clients are connected. The duration of the eviction burst and the size of memory spent on client connection buffers increases proportionally to the number of active clients.\n* Prevent this cycle by avoiding either memory or CPU saturation. In our case, avoiding memory saturation was easier (mainly by reducing cache TTL).\n\n## Further reading\n\nThe following lists summarize the analytical tools and methods cited in this article. These tools are all highly versatile and any of them can provide a massive level-up when working on performance engineering problems.\n\nTools:\n* [perf](https://www.brendangregg.com/perf.html) - A Linux performance analysis multitool. In this article, we used `perf` as a sampling profiler, capturing periodic stack traces of the `redis-server` process's main thread when it is actively running on a CPU.\n* [Flamescope](https://github.com/Netflix/flamescope) - A visualization tool for rendering a `perf` profile (and other formats) into an interactive subsecond heat map. This tool invites the user to explore the timeline for microbursts of activity or inactivity and render flamegraphs of those interesting timespans to explore what code paths were active.\n* [BCC](https://github.com/iovisor/bcc) - BCC is a framework for building BPF tools, and it ships with many useful tools out of the box. In this article, we used `funclatency` to measure the call durations of a specific Redis function and render the results as a histogram.\n* [bpftrace](https://github.com/iovisor/bpftrace) - Another BPF framework, ideal for answering ad-hoc questions about your system's behavior. It uses an `awk`-like syntax and is [quick to learn](https://github.com/iovisor/bpftrace#readme). In this article, we wrote a [custom `bpftrace` script](https://gitlab.com/gitlab-com/gl-infra/scalability/uploads/cab2cd03231f8dd4819f77b44d768cb9/redis_snoop.getMaxmemoryState.sha_25a228b839a93a1395907a03f83e1eee448b0f14.production_thresholds.bt) for observing the variables used in computing how much memory to free during each round of evictions. This script's instrumentation points are specific to our particular build of `redis-server`, but the [approach is able to be generalized](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_982498636) and illustrates how versatile this tool can be.\n\nUsage examples:\n* [Example](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_854745083) - Walkthrough of using `perf` and `flamescope` to capture, filter, and visualize the stack sampling CPU profiles of the Redis main thread.\n* [Example](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_857869826) - Walkthrough (including safety check) of using `funclatency` to measure the durations of the frequent calls to function `performEvictions`.\n* [Example](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/7172#note_971197943) - Experiment for adjusting Redis settings `lazyfree-lazy-eviction` and `maxmemory-eviction-tenacity` and observing the results using `perf`, `funclatency`, `funcslower`, and the Redis metrics for eviction count and memory usage.\n* [Example](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_982498636) - This is a working example (script included) of using `bpftrace` to observe the values of a function's variables. In this case we inspected the `mem_tofree` calculation at the start of `performEvictions`. Also, these [companion notes](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_982499538) discuss some build-specific considerations.\n* [Example](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_987049036) - Describes the latency injection experiment (the first of the three ideas). This experiment confirmed that memory demand increases at the predicted rate when we slow response rate to below request arrival rate, in the same way evictions do. This result confirmed the request queuing itself is the source of the memory pressure that amplifies the eviction burst once it begins.\n",[1040,978,9],{"slug":5449,"featured":6,"template":686},"how-we-diagnosed-and-resolved-redis-latency-spikes","content:en-us:blog:how-we-diagnosed-and-resolved-redis-latency-spikes.yml","How We Diagnosed And Resolved Redis Latency Spikes","en-us/blog/how-we-diagnosed-and-resolved-redis-latency-spikes.yml","en-us/blog/how-we-diagnosed-and-resolved-redis-latency-spikes",{"_path":5455,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5456,"content":5462,"config":5467,"_id":5469,"_type":14,"title":5470,"_source":16,"_file":5471,"_stem":5472,"_extension":19},"/en-us/blog/how-we-use-gitlab-at-the-province-of-nova-scotia",{"title":5457,"description":5458,"ogTitle":5457,"ogDescription":5458,"noIndex":6,"ogImage":5459,"ogUrl":5460,"ogSiteName":670,"ogType":671,"canonicalUrls":5460,"schema":5461},"How we use GitLab at the Province of Nova Scotia","The Unix operations team at the Province of Nova Scotia decided to implement GitLab for source control and CI/CD. Here's how we started exploring DevOps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670226/Blog/Hero%20Images/how-we-use-gitlab-at-nova-scotia.jpg","https://about.gitlab.com/blog/how-we-use-gitlab-at-the-province-of-nova-scotia","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we use GitLab at the Province of Nova Scotia\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Steven Zinck\"},{\"@type\":\"Person\",\"name\":\"Paul Badcock\"}],\n        \"datePublished\": \"2017-07-18\",\n      }",{"title":5457,"description":5458,"authors":5463,"heroImage":5459,"date":5464,"body":5465,"category":791,"tags":5466},[2335,2336],"2017-07-18","\n\nIn 2015 the Unix operations team at the Province of Nova Scotia decided to implement GitLab for source control and [Continuous Integration and Continuous Deployment](/solutions/continuous-integration/). This was the beginning of our foray into DevOps practices. This article describes our automated testing, integration and release of Puppet code.\n\n\u003C!-- more -->\n\n![Image via Steve Zinck and Paul Badcock](https://about.gitlab.com/images/blogimages/nova-scotia-devops/devops-infinity-graphic.png){: .shadow}\u003Cbr>\n\nYou can also learn more about our DevOps transformation by watching our recent interview:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/SHdeqznJXbc\" frameborder=\"0\" allowfullscreen=\"true\">\u003C/iframe>\n\u003C/figure>\n\n### Source control\n\nA source control management (SCM) system allows the user to “commit” code, documentation and other system artifacts such as configuration files to a central location. Each change results in a new version of the file, and previous versions of the file remain available on the SCM. Restoring a previous version is quick and easy.\n\nWe needed a way for multiple sysadmins to be able to work on code without colliding with one another. We also needed a way to vet changes through a peer review process. GitLab makes this easy thanks to its support of branching and merge requests. Branching allows a sysadmin to create an individual copy of the production code (“master”) and work with it in isolation — this allows multiple team members to be working on the same production code base without being concerned about conflicts between their work.\n\n### Continuous integration\n\n![Image via Steve Zinck and Paul Badcock](https://about.gitlab.com/images/blogimages/nova-scotia-devops/ci-cd-workflow.png){: .shadow}\u003Cbr>\n\nAs we built out more of our infrastructure with Puppet, we needed an automated way of testing our code. Over time, our test strategy has evolved to include automated [syntax checking](https://puppet.com/blog/verifying-puppet-checking-syntax-and-writing-automated-tests), [linting](http://puppet-lint.com/), [unit](https://puppet.com/blog/unit-testing-rspec-puppet-for-beginners) and [integration](http://serverspec.org/) tests. Manual testing was not sufficient, as it was often forgotten about and was very time consuming. Automated testing solved that — for every code commit, the test pipeline is executed. A complete test cycle currently takes under five minutes.\n\nOn each code commit to a branch other than master, the following test pipeline is kicked off by GitLab CI:\n\n![Image via Steve Zinck and Paul Badcock](https://about.gitlab.com/images/blogimages/nova-scotia-devops/ci-screenshot.png){: .shadow}\u003Cbr>\n\nIf at any point a job fails, the pipeline stops and the sysadmin is notified. One of the great features of GitLab CI is its tight integration with Docker — each of the jobs above is run inside its own isolated container. The syntax-lint-spec job verifies that the Puppet syntax is good; linting confirms the code conforms to best practices; and spec confirms that logically the code functions as designed.\n\nThe test-kitchen jobs are a full suite of [ServerSpec](http://serverspec.org/) tests. We automatically provision four containers that represent our four most common configurations. Our Puppet code is applied to each container to verify that it will work in our production environment. This acts as a full regression test each time a code commit is made, and ensures that there were no unintended problems introduced. It gives us confidence that the code is actually doing what it’s intended to do.\n\n### Continuous deployment\n\nOnce all of the tests pass, the sysadmin can submit a merge request for their branch, and it will be reviewed by a senior staff member before reaching production. This is an important part of our workflow, because it gives junior staff the confidence that a more senior member of the team will review and approve a change before it reaches any of our servers. If the merge request is accepted, the branch will be merged into master and at that point GitLab CI will push the code to our Red Hat Satellite and Puppet Enterprise servers where it will be deployed to our environment.\n\n![Image via Steve Zinck and Paul Badcock](https://about.gitlab.com/images/blogimages/nova-scotia-devops/cd-screenshot.jpeg){: .shadow}\u003Cbr>\n\nYou can find the configuration files (Dockerfiles, .kitchen.yml, .gitlab-ci.yml and Satellite push script) at our [GitHub](https://github.com/nsgov).\n\nThe implementation of our system automation strategy and the toolset we selected has proven itself many times. We are spending less time fighting fires due to the streamlined and tested nature of our deployments and have earned the confidence of our clients.\n\n### The road ahead\n\nIn upcoming articles, we’ll write about the CI/CD process we built with [Communications Nova Scotia](https://novascotia.ca/cns/) that allows their development team to deploy and roll back their Dockerized application environment on demand. We also plan to write about our automated test strategy for Red Hat Ansible.\n\nThis post originally appeared on [*Medium*](https://medium.com/@szinck/how-we-use-gitlab-at-the-province-of-nova-scotia-708b514cc47f).\n\n## About the Guest Authors\n\n[Steve Zinck](https://www.linkedin.com/in/stevezinck/) spent most of his career working in the Public Service as a Unix and Infrastructure administrator. Over the past few years, he's started to transition away from traditional systems administration and begun to focus on software delivery and automation. As part of that transition, his team has implemented GitLab at the core of our automation and software delivery stack. His current focus is working with software and application teams to assist in streamlining their deployment and delivery process.\n\n[Paul Badcock](https://www.linkedin.com/in/pbadcock/?ppe=1) started working in the IT sector in 1998 with positions in small startups, to large fortune 500 companies, to currently on a public-sector team. His career was focused as a traditional IT Linux administrator until in the mid-2000s he started focusing on adopting development tooling, practices and methodologies for operational teams. This work culminated in implementing an early 2010s DevOps workplace framework with the help of @stewbawka and subsequently working with like-minded teams since. As a part of adopting developer tools he has previously worked with and managed CVS, SVN installations and various vendor products before reading a “Show HN” posting on Hacker News about GitLab.\n",[9,109,683],{"slug":5468,"featured":6,"template":686},"how-we-use-gitlab-at-the-province-of-nova-scotia","content:en-us:blog:how-we-use-gitlab-at-the-province-of-nova-scotia.yml","How We Use Gitlab At The Province Of Nova Scotia","en-us/blog/how-we-use-gitlab-at-the-province-of-nova-scotia.yml","en-us/blog/how-we-use-gitlab-at-the-province-of-nova-scotia",{"_path":5474,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5475,"content":5480,"config":5486,"_id":5488,"_type":14,"title":5489,"_source":16,"_file":5490,"_stem":5491,"_extension":19},"/en-us/blog/how-zoopla-uses-dora-metrics-and-your-team-can-too",{"title":5476,"description":5477,"ogTitle":5476,"ogDescription":5477,"noIndex":6,"ogImage":4082,"ogUrl":5478,"ogSiteName":670,"ogType":671,"canonicalUrls":5478,"schema":5479},"Zoopla Boosts Deployments & Automation with DORA Metrics","GitLab customer Zoopla used the DORA metrics to boost production deployments from once a week to roughly 40 times a day. And that was only one of the performance improvements...","https://about.gitlab.com/blog/how-zoopla-uses-dora-metrics-and-your-team-can-too","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Zoopla used DORA metrics to boost deployments, increase automation and more\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gustaw Fit of Zoopla\"}],\n        \"datePublished\": \"2022-01-24\",\n      }",{"title":5481,"description":5477,"authors":5482,"heroImage":4082,"date":4127,"body":5484,"category":679,"tags":5485},"How Zoopla used DORA metrics to boost deployments, increase automation and more",[5483],"Gustaw Fit of Zoopla","\n\nAbout two years ago, Zoopla started wondering how we could measure the overall improvements in performance in the engineering department. We were in the early stages of a program of work called [Bedrock](https://zoopla.blog/posts/2021/project-bedrock-replatforming/). Bedrock was all about making engineering capability more efficient and flexible in responding to the business needs.\n\nAfter researching various options, we decided on the [DORA metrics](https://cloud.google.com/blog/products/devops-sre/using-the-four-keys-to-measure-your-devops-performance). They provided us with all the necessary insights to track our success, and benchmark ourselves against a definition of good.\n\n## What is DORA?\n\nDORA is the acronym for the DevOps Research and Assessment group: they’ve surveyed more than 50,000 technical professionals worldwide to better understand how the technical practices, cultural norms, and management approach affect organisational performance.\n\n(Take a dive into the [latest DORA Report](https://www.ciosummits.com/Online_Assets_Puppet_2016_State_of_DevOps_Report.pdf) and in the book that summarizes the findings:  [Accelerate](https://www.amazon.com/Accelerate-Building-Performing-Technology-Organizations/dp/B07BMBYHXL/ref=sr_1_2?crid=R1O9AH85U6PR&keywords=accelerate+book&qid=1643046474&sprefix=accelerate+book%2Caps%2C70&sr=8-2)).\n\n## What are the metrics Zoopla is using?\n\n- Production deploy frequency - Time between the first commit on a merge request to master and production deployment\n- Lead time - Number of successful production deployments / day\n- Mean Time To Recover - Time required from customer impact first started to removal of the customer impact\n- Change fail rate - For the primary application or service you work on, what percentage of changes to production or released to users result in degraded service (e.g., lead to service impairment or service outage) and subsequently require remediation (e.g., require a hotfix, rollback, fix forward, patch)\n- Time to onboard - Time required from the engineer who had joined the company, until their first commit is merged to master on a non-personal repository.\n\n\n## How do we understand the metrics?\n\n- Production deploy frequency - limiting amount of code going to production at once (limited batch size)\n- Lead time - reducing amount of blockers for developers\n- MTTR - improving speed of incident recovery\n- Change fail rate - improving quality focus\n- Time to onboard - how efficient is our onboarding process\n\nFollowing the rules of lean:\n\n- Value is only released to production, once it leaves the factory floor (production deploy frequency)\n- Optimize Work In Progress (lead time)\n- Invest in SRE/automation (mean time to recover)\n- Practice kaizen (change fail rate)\n- Have efficient knowledge sharing and work allocation processes (time to onboard)\n\n## How are we collecting the metrics?\n\nWe are using the following data sources:\n\n[GitLab](https://about.gitlab.com) for deploy frequency, lead time, change fail rate and time to onboard\n\n[Blameless](https://www.blameless.com) for mean time to recover (as recorded in incidents)\n\n[Jenkins](https://www.jenkins.io) for deploy frequency and change fail rate\n\nThe process is using APIs extensively. We also needed to come up with a standardised data schema to be able to meaningfully use the metrics. The raw data stored in the s3 bucket can be used in any visualization tool. For our own purposes we have decided to display them in a google spreadsheet. All of these required an extensive implementation effort. The whole flow is powered by modern Python.\n\nSome parts of our process are still not perfect. We are actively working to simplify the flows and standardize data sets.\n\n## How are the metrics used at Zoopla?\n\nThe dashboard is regularly reviewed by the senior engineering management. The metrics are on public display, and are discussed and reviewed in our monthly town hall meeting, and our fortnightly Ops Review. Each team is encouraged to reflect on the metrics as they plan their work, and consider improvements they could introduce.\n\nThe metrics also influence the decisions and prioritization. Just as importantly, they help us to transform our company culture.\n\nIn terms of improvements measured:\n\n- Production deployment frequency was improved from once in a week to multiple times per day (~40 deployments per day).\n- Lead time was improved from an average of 10 days to less than two days (with many projects being close to 2-4h on average).\n- Mean time to recover: we have not measured it before, the main benefit for us is understanding what we need to improve. We are currently in the area of 1-3h on average for sev-0 or sev-1 issues and 24h on average, when we include sev-2 issues.\n- Change failure rate was about 60% before we started, it is now oscillating between ~1-5%.\n- Time to onboard was over 20 days, and we have brought this down to around five days.\n\nThe main cultural changes were:\n\n- We have automated the majority of our deployment pipelines.\n- We have added a lot of automation to incident resolution, primarily by adding auto-scaling.\n- We have trained our teams in incident response, and introduced an on-call rota.\n- We have moved the bulk of our infrastructure management to a standardised Infrastructure as Code (mainly Terraform).\n- We have improved our onboarding process.\n- We have improved our alerting, and partnered with New Relic to reduce investigation effort.\n\nWe hold the ambition to join the elite performing group of organisations as defined by the State of DevOps report. Each day brings us closer to that goal.\n\n## What are our future plans?\n\nOn the technical side, we are working to improve automation of the metrics, to go away from our internal and bespoke metric collection model. We hope our partnership with New Relic will soon enable a much better solution.\n\nOn the DevOps/DORA culture side, we are providing regular talks and training to wider audiences (not only engineering), to establish DORA as a reference point in future product development. We are also making it a key point of our new consolidated engineering strategy.\n\nWe’ve found the DORA metrics helped us improve our software development and delivery processes. With these findings, organizations can make informed adjustments in their process workflows, automation, team composition, tools, and more. We recommend you try this in your organisation too.\n\nFurther reading:\n\n- [The Phonenix Project](https://www.amazon.com/The-Phoenix-Project-audiobook/dp/B00VATFAMI/ref=sr_1_1?crid=3U43AWAK4L6YI&keywords=The+Phoenix+project&qid=1643046949&sprefix=the+phoenix+project%2Caps%2C70&sr=8-1) by Gene Kim, Kevin Behr and George Spafford\n- [The Goal: A Process of Ongoing Improvement](https://www.amazon.com/The-Goal-audiobook/dp/B00IFGGDA2/ref=sr_1_1?crid=2EAKYMNBHT0B5&keywords=the+goal+by+eliyahu+goldratt&qid=1643047036&s=audible&sprefix=The+goal%2Caudible%2C125&sr=1-1) by Eliyahu Goldratt and Jeff Cox\n- [The Unicorn Project](https://www.amazon.com/The-Unicorn-Project-Gene-Kim-audiobook/dp/B0812C82T9/ref=sr_1_1?crid=2B0ENCYRNG2BO&keywords=the+unicorn+project&qid=1643047132&s=audible&sprefix=the+unicorn%2Caudible%2C76&sr=1-1) by Gene Kim et al\n",[9,793,2981],{"slug":5487,"featured":6,"template":686},"how-zoopla-uses-dora-metrics-and-your-team-can-too","content:en-us:blog:how-zoopla-uses-dora-metrics-and-your-team-can-too.yml","How Zoopla Uses Dora Metrics And Your Team Can Too","en-us/blog/how-zoopla-uses-dora-metrics-and-your-team-can-too.yml","en-us/blog/how-zoopla-uses-dora-metrics-and-your-team-can-too",{"_path":5493,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5494,"content":5499,"config":5504,"_id":5506,"_type":14,"title":5507,"_source":16,"_file":5508,"_stem":5509,"_extension":19},"/en-us/blog/if-its-time-to-learn-devops-heres-where-to-begin",{"title":5495,"description":5496,"ogTitle":5495,"ogDescription":5496,"noIndex":6,"ogImage":4540,"ogUrl":5497,"ogSiteName":670,"ogType":671,"canonicalUrls":5497,"schema":5498},"It's time to learn DevOps and here's where to begin","DevOps is a unique blend of tech, tools and culture. Take it step-by-step and it's easy to learn. This simple guide shows you how to get started. Learn more here!","https://about.gitlab.com/blog/if-its-time-to-learn-devops-heres-where-to-begin","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"It's time to learn DevOps and here's where to begin\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-03-10\",\n      }",{"title":5495,"description":5496,"authors":5500,"heroImage":4540,"date":5501,"body":5502,"category":769,"tags":5503},[810],"2022-03-10","\n\nIf you’re fairly new – or really new – to a DevOps team, you’ve made a great career move, but you probably [have a lot to learn](/topics/devops/devops-beginner-resources/). To truly learn DevOps, there are technologies and processes to figure out, phases to understand, and a [whole new mindset to adopt](/blog/soft-skills-are-the-key-to-your-devops-career-advancement/). \n\n## Learn DevOps, where to start?\n\nLearn DevOps? Why? Where?... Since the demand for DevOps professionals is hot and salaries for this [dynamic job sector](/blog/four-tips-to-increase-your-devops-salary/) are on the rise, there are a lot of DevOps beginners trying to figure out what to learn first. But don’t worry: We can help. \n\nWith a lot on [your learn DevOps to-do list](https://learn.gitlab.com/beginners-guide-devops/guide-to-devops), we’ll walk you through where you should start, including figuring out what DevOps is all about, the stages of the DevOps lifecycle, and the uniquely [collaborative culture](/blog/engineering-teams-collaborating-remotely/). \n\n## What DevOps is really all about\n\nIn the past, software development was done using a complicated and confusing jumble of tools and workflows. Both projects and teams often were siloed, which meant they weren’t coordinating efforts or sharing best practices. It was a frustrating and inefficient process that led to deployment traffic jams, costing teams time and money. There were a lot of headaches.\n\nThink of DevOps as a way to simplify development and deployment, while making the entire process more efficient. With DevOps, once-siloed teams, tools, and workflows are combined in a software development ecosystem. That ecosystem enables teams to plan, create and deliver more efficiently, securely, and collaboratively. \n\n## What to learn for DevOps\n\nDevOps also puts a focus on automation, shifting security left, and making practices not only repeatable but measurable. That speeds development cycles and slashes the time between designing new features and rolling them out into production.\n\nBecause of this efficiency and the enablement of teamwork, DevOps makes not only your software delivery more agile, it makes your entire company more agile. DevOps enables the business to pivot quickly, answering new and critical customer needs, responding to changes in the market and adjusting to stay ahead of the competition. \n\n## To learn DevOps, collaborate\n\nDevOps is built around a culture of collaboration that encourages teammates to share ideas and help each other. It’s not simply something that’s suggested and it’s not something that’s done in a meeting or two. Collaboration is a [core principle](/blog/4-must-know-devops-principles/) of DevOps. \n\nIt's easy to think that to learn DevOps means focusing on programming languages, security, and CI/CD. Those skills and technologies are critical but don’t dismiss the idea of collaboration. It’s about communication, and working together to create something new and to fix problems. However, DevOps professionals also collaborate with other departments, like security, marketing, and the C-suite. You’re all pulling in the same direction.\n\nIn the [2021 Global DevSecOps Survey](/developer-survey/), survey respondents consistently said communication and collaboration skills were key to their future careers. \n\n## The key stages of the DevOps lifecycle\n\nThere’s a definite flow to DevOps, with the process moving from planning and developing all the way through to deployment, monitoring, and feedback. There are three basic stages, or phases – build, test, and deploy. Within these are nine other stages that will help you produce software efficiently, reliably, and with speed and agility.\n\n- Planning focuses on everything that happens before a single line of code is written.\n- Creating is about designing and developing.\n- Verifying checks the quality of the code.\n- Packaging applications and dependencies, managing containers, and building artifacts maintains a consistent software supply chain. \n- Release, or deployment, is all about moving code updates into production as iterations are ready.\n- Configuring is focused on creating, managing, and maintaining application environments.\n- Monitoring is about checking the status of software and networks.\n- Protecting is all about securing your applications and their environment.\n- Managing runs end-to-end through your software development lifecycle, controlling permissions and processes. \n\n## What it means to shift security left\n\nDid you notice that security wasn’t one of the lifecycle stages for DevOps? Well, it’s not a single stage because it’s woven into EVERY stage. Shift left means you don’t wait to incorporate security into software at the end of a build. You consider security beginning with the initial planning stage and continue to focus on it all the way through, giving you more opportunity to avoid or find and address any issues. Shifting left enables you to make sure the code you are developing functions as intended, and that any vulnerabilities and compliance issues are caught and fixed.\n\n## Understand CI/CD\n\nFirst off, CI/CD means continuous integration and continuous delivery. Combined continuous development methodologies and practices focus on catching vulnerabilities and errors early in the development lifecycle, ensuring that all the code deployed into production complies with standards the DevOps team has established for the software being created. This helps connect development and operations teams, as well as projects, by using automation for building, testing, and deployment. \n\nCI/CD is all about  incremental code changes being made frequently and reliably – a critical part of how a DevOps platform enables an organization to automatically deliver software multiple times a day. This is key for DevOps teams and the overall business because CI/CD helps to quickly and efficiently move software updates into production, making the organization able to respond faster to customer needs. \n\n## How to get started with DevOps: dig deeper\n\nWant to learn more? Our [Beginner's guide to DevOps](https://page.gitlab.com/resources-ebook-beginners-guide-devops.html) has everything you need to get started.\n",[9,109,749],{"slug":5505,"featured":6,"template":686},"if-its-time-to-learn-devops-heres-where-to-begin","content:en-us:blog:if-its-time-to-learn-devops-heres-where-to-begin.yml","If Its Time To Learn Devops Heres Where To Begin","en-us/blog/if-its-time-to-learn-devops-heres-where-to-begin.yml","en-us/blog/if-its-time-to-learn-devops-heres-where-to-begin",{"_path":5511,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5512,"content":5518,"config":5524,"_id":5526,"_type":14,"title":5527,"_source":16,"_file":5528,"_stem":5529,"_extension":19},"/en-us/blog/improve-cd-workflows-helm-chart-registry",{"title":5513,"description":5514,"ogTitle":5513,"ogDescription":5514,"noIndex":6,"ogImage":5515,"ogUrl":5516,"ogSiteName":670,"ogType":671,"canonicalUrls":5516,"schema":5517},"Get started with GitLab's Helm Package Registry","Improve CD workflows and speed up application deployment using our new Helm Package Registry.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668078/Blog/Hero%20Images/cover-image-helm-registry.jpg","https://about.gitlab.com/blog/improve-cd-workflows-helm-chart-registry","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Get started with GitLab's Helm Package Registry\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Philip Welz\"}],\n        \"datePublished\": \"2021-10-18\",\n      }",{"title":5513,"description":5514,"authors":5519,"heroImage":5515,"date":5521,"body":5522,"category":769,"tags":5523},[5520],"Philip Welz","2021-10-18","\n\nIn our 14.1 release, we offered the ability to add Helm charts to the GitLab Package Registry. Here's everything you need to know to leverage application deployment with these new features.\n\n## The role of container images\n\nThe de-facto standard is to package applications into [OCI Images](https://github.com/opencontainers/image-spec) which are often just referred to as `container images` and more often as `Docker containers`. The [Open Container Initiative](https://opencontainers.org/) was launched in 2015 by Docker and other companies to define industry standards around container image formats and runtimes. GitLab introduced an OCI conform [Container Registry](/blog/gitlab-container-registry/) with the release of [GitLab 8.8](/releases/2016/05/22/gitlab-8-8-released/) in May 2016.\n\nToday, a common and widely adopted approach is to deploy applications with [Helm charts](https://helm.sh/) to [Kubernetes](https://kubernetes.io/). This will be covered in this blog together with the feature release in [GitLab 14.1](/releases/2021/07/22/gitlab-14-1-released/) of adding Helm Charts to the [GitLab Package Registry](https://docs.gitlab.com/ee/user/packages/package_registry/).\n\n### Install software to Kubernetes\n\nIn the DevOps era, [APIs](https://en.wikipedia.org/wiki/API) became incredibly popular, helping to drive demand for Kubernetes.\n\nThe core of Kubernetes' control plane is the API server. The API server exposes an HTTP REST API that lets end users, different parts of your cluster, and external components communicate with one another.\n\nTo interact with the API server we can use the command-line tool [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) - although it would be also possible to use software development kits (SDKs) or any client that understands REST like curl that was released 1997.\n\nBut which data format is best to use?\n\nModern APIs most likely use JSON. JSON is a human-readable format that provides provide access to machine-readable data. Here is an example for Kubernetes:\n\n```json\n{\n    \"kind\": \"Pod\",\n    \"apiVersion\": \"v1\",\n    \"metadata\": {\n        \"name\": \"nginx\",\n        \"creationTimestamp\": null,\n        \"labels\": {\n            \"run\": \"nginx\"\n        }\n    },\n    \"spec\": {\n        \"containers\": [\n            {\n                \"name\": \"nginx\",\n                \"image\": \"nginx\",\n                \"resources\": {}\n            }\n        ],\n        \"restartPolicy\": \"Always\",\n        \"dnsPolicy\": \"ClusterFirst\"\n    },\n    \"status\": {}\n}\n```\n\nOne downside of JSON is that comments are not supported. That is one several reasons why YAML stepped in and took the spot as the de-facto language to use for declarative configurations. The Kubernetes API transforms YAML to JSON behind the scenes. As you can easily convert back and forth between both, YAML tends to be more user-friendly. Nginx example Pod in YAML:\n\n```yaml\napiVersion: v1\nkind: Pod\nmetadata:\n  creationTimestamp: null\n  labels:\n    run: nginx\n  name: nginx\nspec:\n  Containers:\n  # NOTE: If no tag is specified latest will be used\n  - image: nginx\n    name: nginx\n    # TODO\n    resources: {}\n  dnsPolicy: ClusterFirst\n  restartPolicy: Always\nstatus: {}\n```\n\nNow you are ready to save our YAML code in a file called `nginx.yaml` and deploy it into Kubernetes:\n\n```shell\n$ kubectl apply --filename=nginx.yaml \n```\n\n### Create a Helm chart\n\nApplying YAML configuration files can get overwhelming, especially when needing to deploy into several environments or wanting to version the manifests. It is also cumbersome to maintain plain YAML files for more complex deployments which can easily extend to more than 1000 lines per file.\n\nInstead, how about using a format that packages our applications and makes them easily reproducible with templates? How about adding our own versioning scheme to this packaged application? How about deploying the same version with a few lines of code to multiple environments? This all comes with Helm.\n\nTo create a Helm package you have to ensure that the Helm CLI is [installed](https://helm.sh/docs/intro/install/) on your system (example with Homebrew on macOS: `brew install helm`).\n\n```shell\n$ helm create nginx \n```\n\nInspect the created Helm boilerplate files with `ls -lR` or `tree` on the CLI. This Helm chart can also be tested in a sandbox environment to verify it is operational.\n\n```shell\n.\n├── Chart.yaml\n├── charts\n├── templates\n│   ├── NOTES.txt\n│   ├── _helpers.tpl\n│   ├── deployment.yaml\n│   ├── hpa.yaml\n│   ├── ingress.yaml\n│   ├── service.yaml\n│   ├── serviceaccount.yaml\n│   └── tests\n│       └── test-connection.yaml\n└── values.yaml\n```\n\nNOTE: You can read more about the starter Chart [here](https://helm.sh/docs/chart_template_guide/getting_started/).\n\nKindly Helm creates a starter chart directory along with the common files and directories used in a chart with NGINX as an example. We again can install this into our Kubernetes cluster:\n\n```shell\n$ helm install nginx .\n```\n\n### Package Distribution\n\nThus far, we have learned that applications are packaged in containers and are installed using a Helm chart. Both methods require central distribution storage that is publicly accessible, or accessible in your local network environment where the Kubernetes clusters are running.\n\nThe Helm documentation provides insights on [running your own Helm registry](https://helm.sh/docs/topics/registries/), similar to hosting your own Docker container registry.\n\nWhat if we could avoid Do It Yourself DevOps and have both containers and Helm charts in one central DevOps platform? After maturing the [container registry in GitLab](https://docs.gitlab.com/ee/user/packages/container_registry/), community contributors helped add the [Helm chart registry](https://docs.gitlab.com/ee/user/packages/helm_repository/index.html) in 14.1.\n\nBuilding the container image and Helm chart is part of the CI/CD pipeline stages and jobs. The missing bit is the automated production deployment using Helm charts in your Kubernetes cluster.\n\nAn additional benefit in CI/CD is reusing the authentication mechanism, and working in the same trust environment with security jobs before actually uploading and publishing any containers and charts.\n\n### Build the Helm Chart\n\n```shell\n$ helm package nginx \n```\n\nThe command creates a new tar.gz archive ready to upload. Before doing so, you can inspect the archive with the `tar` command to verify its content.\n\n```shell\n$ tar ztf nginx-0.1.0.tgz\n\nnginx/Chart.yaml\nnginx/values.yaml\nnginx/templates/NOTES.txt\nnginx/templates/_helpers.tpl\nnginx/templates/deployment.yaml\nnginx/templates/hpa.yaml\nnginx/templates/ingress.yaml\nnginx/templates/service.yaml \nnginx/templates/serviceaccount.yaml\nnginx/templates/tests/test-connection.yaml\nnginx/.helmignore\n```\n\n### Push the Helm chart to the registry\n\nWith the [helm-push](https://github.com/chartmuseum/helm-push/#readme) plugin for Helm we can now upload the chart to the GitLab Helm Package Registry:\n\n```shell\n$ helm repo add --username \u003Cusername> --password \u003Cpersonal_access_token> \u003CREGISTRY_NAME> https://gitlab.com/api/v4/projects/\u003Cproject_id>/packages/helm/stable\n$ helm push nginx-0.1.0.tgz nginx\n```\n\nThis step should be automated for a production-ready deployment with a GitLab CI/CD job.\n\n```yaml\ndefault:\n  image: dtzar/helm-kubectl\n  before_script:\n    - 'helm repo add --username gitlab-ci-token --password ${CI_JOB_TOKEN} ${CI_PROJECT_NAME} ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/helm/stable'\nstages:\n  - upload\nupload:\n  stage: upload\n  script:\n    - 'helm plugin install https://github.com/chartmuseum/helm-push.git'\n    - 'helm push ./charts/podtatoserver-0.1.0.tgz ${CI_PROJECT_NAME}'\n```\n\n### Install the Helm chart\n\nFirst, add the Helm chart registry to your local CLI configuration and test the manual installation.\n\n```shell\n$ helm repo add --username \u003Cusername> --password \u003Cpersonal_access_token> \u003CREGISTRY_NAME> https://gitlab.com/api/v4/projects/\u003Cproject_id>/packages/helm/stable\n$ helm install --name nginx \u003CREGISTRY_NAME>/nginx\n```\n\nOnce it works, you can continue with adding an automated installation job into the CI/CD pipeline.\n\n```yaml\ndefault:\n  image: alpine/helm\n  before_script:\n    - 'helm repo add --username gitlab-ci-token --password ${CI_JOB_TOKEN} ${CI_PROJECT_NAME} ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/helm/stable'\nstages:\n  - install\nupload:\n  stage: install\n  script:\n    - 'helm repo update'\n    - 'helm install --name nginx ${CI_PROJECT_NAME}/nginx'\n```\n\n### Complete your DevOps lifecycle\n\nYou can learn more about the newest GitLab registries for Helm and Terraform in this [#EveryoneCanContribute cafe session](https://everyonecancontribute.com/post/2021-07-28-cafe-40-terraform-helm-gitlab-registry/) and inspect the [deployment repository](https://gitlab.com/everyonecancontribute/kubernetes/civo-k3s).\n\nTry the Helm chart registry and share your workflows. Are there any features missing to complete your DevOps lifecycle? Let us know [on Discord](https://discord.gg/qgQWhD6wWV).\n\nCover image by [Joseph Barrientos](https://unsplash.com/@jbcreate_) on [Unsplash](https://unsplash.com/photos/eUMEWE-7Ewg)\n{: .note}\n",[9,977,1477],{"slug":5525,"featured":6,"template":686},"improve-cd-workflows-helm-chart-registry","content:en-us:blog:improve-cd-workflows-helm-chart-registry.yml","Improve Cd Workflows Helm Chart Registry","en-us/blog/improve-cd-workflows-helm-chart-registry.yml","en-us/blog/improve-cd-workflows-helm-chart-registry",{"_path":5531,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5532,"content":5538,"config":5544,"_id":5546,"_type":14,"title":5547,"_source":16,"_file":5548,"_stem":5549,"_extension":19},"/en-us/blog/incident-management-with-aws-cloudwatch",{"title":5533,"description":5534,"ogTitle":5533,"ogDescription":5534,"noIndex":6,"ogImage":5535,"ogUrl":5536,"ogSiteName":670,"ogType":671,"canonicalUrls":5536,"schema":5537},"How to use GitLab's Incident Management with AWS CloudWatch","It's a straightforward process to set up GitLab Incident Management to work with AWS CloudWatch alarms. Here's what you need to know to get started.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664070/Blog/Hero%20Images/cloudwatch-gitlab-incident-management-bg.jpg","https://about.gitlab.com/blog/incident-management-with-aws-cloudwatch","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab's Incident Management with AWS CloudWatch\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sean Arnold\"}],\n        \"datePublished\": \"2020-10-08\",\n      }",{"title":5533,"description":5534,"authors":5539,"heroImage":5535,"date":5541,"body":5542,"category":791,"tags":5543},[5540],"Sean Arnold","2020-10-08","\n\nAWS CloudWatch is a popular tool for users of Amazon Web Services to monitor and set alarms on their resources, including EC2 instances, RDS databases and many more.\n\nWhen alarms fire, it is important that your toolchain can quickly and effectively notify you and collate the relevant data. This enables your team to start determining the root cause and take action toward remediation.\n\nGitLab Incident Management now makes it easier than ever to do this. GitLab can take AWS CloudWatch alerts (aka [alarms](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html)), or alerts from any other monitoring and alerting tool you have, and seamlessly integrate them into your [DevOps lifecycle](/topics/devops/).\n\n\n\n## Getting your alerts from AWS CloudWatch to GitLab\n\nNote: For this post, we will assume you are familiar with setting up CloudWatch metrics and alarms within AWS. For more information on AWS Cloudwatch consult the [AWS documentation](https://aws.amazon.com/cloudwatch/).\n\n### Enable the endpoint\n\nWith our generic alert endpoint, GitLab can ingest alerts via REST from any alerting service you have. An alert can be as simple as providing a title or as complex as you need. We provide some defined attributes that you can use to refine your GitLab Incident Management experience, such as the severity of the alert, the service that is alerting, and `gitlab_environment_name` so that you can get an [insight into your alerts for an associated environment and deployment](https://docs.gitlab.com/ee/ci/environments/#environment-incident-management) for users on our Gold and Ultimate plans.\n\nThe first step is to enable your project's alert endpoint. Follow the instructions in the [docs](https://docs.gitlab.com/ee/operations/incident_management/integrations.html#setting-up-generic-alerts) to do this.\n\nNext, we need to ensure the data sent to GitLab is in the expected payload format.\n\n### Transform the payload\n\nOne approach to send CloudWatch alarm data to GitLab is to use AWS Lambda to call the GitLab REST endpoint. We can set this up by publishing the CloudWatch alarm to an [SNS](https://aws.amazon.com/sns/) endpoint, which is then consumed by AWS Lambda to mutate and forward the alert payload to GitLab.\n\n![AWS CloudWatch to GitLab Incident Management](https://about.gitlab.com/images/blogimages/cloudwatch-incident-management-flow.png)\n\nIf you want to get this up and running quickly, I’ve [provided an AWS SAM (Serverless Application Model) application](https://gitlab.com/gitlab-examples/ops/incident-setup/everyone/cloudwatch-sns-to-gitlab-alerts) which can setup the Lambda application with the environment variables ready for you to enter your GitLab endpoint URL in.\n\nWe know that managing the integration between two tools can be painful. In the future, we want to make this step as easy as possible: the step of transforming your payload into GitLab Alert format will soon be replaced by [custom endpoints for alerts](https://gitlab.com/groups/gitlab-org/-/epics/4390).\n\nNext, you can [setup your SNS Notification Topic](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/US_SetupSNS.html), and subscribe to the [SNS Topic with your Lambda function](https://docs.aws.amazon.com/sns/latest/dg/sns-lambda-as-subscriber.html).\n\n### Receive your alerts\n\nWhen your CloudWatch alarm next triggers, Lambda should then fire the alert off to GitLab. You should then see your alert in the [Alert list](https://docs.gitlab.com/ee/operations/incident_management/alerts.html).\n\n![AWS CloudWatch to GitLab Incident Management alert list](https://about.gitlab.com/images/blogimages/cloudwatch-gitlab-incident-management-list.png)\n\nYou can click on an alert to [see more details](https://docs.gitlab.com/ee/operations/incident_management/alerts.html), assign an alert to a user and change the status of the alert. If the alert is significant enough to raise an incident, you can do that by clicking the “Create Incident.”\n\nCreating an incident will give you the power to assign team members to it and collaborate on it just like you would a regular GitLab issue. The incident will have the payload of the alert included in the [Alert Details tab](https://docs.gitlab.com/ee/operations/incident_management/incidents.html#alert-details).\n",[9,231,1339],{"slug":5545,"featured":6,"template":686},"incident-management-with-aws-cloudwatch","content:en-us:blog:incident-management-with-aws-cloudwatch.yml","Incident Management With Aws Cloudwatch","en-us/blog/incident-management-with-aws-cloudwatch.yml","en-us/blog/incident-management-with-aws-cloudwatch",{"_path":5551,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5552,"content":5558,"config":5564,"_id":5566,"_type":14,"title":5567,"_source":16,"_file":5568,"_stem":5569,"_extension":19},"/en-us/blog/incident-management-with-gitlab",{"title":5553,"description":5554,"ogTitle":5553,"ogDescription":5554,"noIndex":6,"ogImage":5555,"ogUrl":5556,"ogSiteName":670,"ogType":671,"canonicalUrls":5556,"schema":5557},"Understand incident management with GitLab","GitLab Incident Management helps your response teams focus on the problem and shorten the mean time to repair rather than waste time on the process itself.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681208/Blog/Hero%20Images/incident-management-blog-cover.jpg","https://about.gitlab.com/blog/incident-management-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Understand incident management with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarah Waldner\"}],\n        \"datePublished\": \"2020-04-03\",\n      }",{"title":5553,"description":5554,"authors":5559,"heroImage":5555,"date":5561,"body":5562,"category":679,"tags":5563},[5560],"Sarah Waldner","2020-04-03","\n\nManaging incidents can be stressful! While you’re busy trying to restore service for your customers, you are also likely juggling several competing priorities: digging through multiple tools to understand the problem, communicating with stakeholders, and updating tickets in different systems. Did you know that you can use GitLab to help manage the chaos?\n\nGitLab Incident Management, which recently became a [viable category](/direction/maturity/), aims to decrease the overhead of managing an incident so response teams can spend more time actually resolving problems. We do this by enabling teams to quickly gather the resources in one central, aggregated view. We facilitate communication and enable teams to have dialogs that can be captured all in the same tool they already use to collaborate on development. Ultimately, GitLab Incident Management can help response teams to shorten [MTTR](https://en.wikipedia.org/wiki/Mean_time_to_repair).\n\n## Why Incident Management within GitLab?\n\nGitLab is a complete [DevOps platform](/topics/devops/), delivered as a [single application](/handbook/product/single-application/). As such, we believe there are additional benefits for DevOps users to manage incidents within GitLab.\n\n1. Co-location of code, CI/CD, monitoring tools, and incidents reduces context switching and enables GitLab to correlate what would be disparate events or processes within one single control pane.\n1. The same interface for collaboration for development and incident response streamlines the process. The developers who are on call can use the same interface that they already use every day; this prevents the incident responders from having to use a tool that they are unfamiliar with and thus hampering their ability to respond to the incident.\n\n## GitLab Incident Management Capabilities\n\nAvailable today, GitLab Incident Management includes the following highlighted capabilities:\n* [Incident issues](https://docs.gitlab.com/ee/operations/incident_management/index.html#configuring-incidents) as the one place to capture all data and information related to the incident.\n* [Integration with Slack](https://docs.gitlab.com/ee/user/project/integrations/gitlab_slack_application.html#gitlab-slack-application-free-only) to facilitate intuitive team communication\n* [Link Zoom calls to GitLab issues](https://docs.gitlab.com/ee/operations/incident_management/index.html#zoom-in-issues) to facilitate synchronous communication\n* [Embed GitLab-managed Kubernetes metrics](https://docs.gitlab.com/ee/operations/incident_management/index.html#gitlab-hosted-metrics) directly within the GitLab Incident Issue\n* [Embed generic Grafana metrics](https://docs.gitlab.com/ee/operations/incident_management/index.html#grafana-metrics) directly within the GitLab Incident Issue\n* [The GitLab alerts endpoint](https://docs.gitlab.com/ee/operations/incident_management/index.html#alert-endpoint) can accept alerts from any source via a generic webhook receiver\n* Prometheus Recovery alerts can [automatically close issues](https://docs.gitlab.com/ee/user/project/integrations/prometheus.html#taking-action-on-incidents) that were created when you receive notification that the alert is resolved.\n\n## How to use GitLab Incident Management\n\nThere are numerous entry points to a potential incident. As an incident responder, once you are aware of an ongoing incident, you can manually create an incident issue by simply tagging the issue with the `incident` label.\n\nAlternatively, you can also configure GitLab to automatically create incidents based on alerts from your monitoring tool. When an alert is posted to the GitLab [Alerts endpoint](https://docs.gitlab.com/ee/operations/incident_management/integrations.html), GitLab can create incidents using an [issue template](https://docs.gitlab.com/ee/user/project/description_templates.html), populating important information useful to the incident response team.\n\nThe incident issue template can be customized using [quick actions](https://docs.gitlab.com/ee/user/project/description_templates.html) to label, mention team members, or assign to specific people automatically. Doing so will help create incidents that have a consistent baseline set of information to help jumpstart the incident response.\n\nAs more details for the ongoing incident emerge, you can directly embed GitLab-managed Kubernetes cluster metrics and application metrics in the incident. You can also embed other Grafana metrics in the incident if this is a critical tool for your team. Sharing up to date information in a central location will help facilitate understanding and enable the incident response team to move forward armed with the latest information. Having embedded charts can also enable more effective retrospectives by having relevant information within the same view.\n\nAs the firefight progresses, the incident response team is encouraged to add timeline events, updates, questions, and answers to the incident. These interactions help create an audit trail and enable shared understanding across the team.\n\nAt the end, an incident can be automatically closed once GitLab receives a recovery alert via the enabled Prometheus recovery alert integration. As the team reconvenes to determine actionable next steps, it  can leverage the completed incident ticket to find improvement areas instead of relying on a separate tool. Furthermore, a team can directly create and link action items to the incident issue in the form of related issues and merge requests to improve the resiliency of the system.\n\n## Next Steps\n\nGet started by visiting the [Incident Management documentation page](https://docs.gitlab.com/ee/operations/incident_management/index.html) and create an issue template. Adopt a new process or amend the existing process for incident management to take advantage of the capabilities within GitLab.\n\nIncident Management is a [focus area](/direction/maturity/#monitor) for GitLab in 2020. We plan to continue iterating and improving this category. We’d love your help in prioritizing work on the most valuable improvements to the incident management solution. Keep an eye on [Incident Management Issues](https://gitlab.com/groups/gitlab-org/-/epics/349) and upvote or share your experiences in relevant issues.\n\nTo report a bug or request a feature or enhancement, follow these steps:\n\n* Open an issue in the [GitLab project](https://gitlab.com/gitlab-org/gitlab/issues).\n* Describe the feature enhancement and, if possible, include examples.\n* Add these labels to the issue: Category:Incident Management, devops::monitor, group::health\n* Tag @abellucci on the issue.\n\nCover image by [Tine Ivanic](https://unsplash.com/photos/u2d0BPZFXOY) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[9,916],{"slug":5565,"featured":6,"template":686},"incident-management-with-gitlab","content:en-us:blog:incident-management-with-gitlab.yml","Incident Management With Gitlab","en-us/blog/incident-management-with-gitlab.yml","en-us/blog/incident-management-with-gitlab",{"_path":5571,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5572,"content":5577,"config":5583,"_id":5585,"_type":14,"title":5586,"_source":16,"_file":5587,"_stem":5588,"_extension":19},"/en-us/blog/industry-moving-to-single-application-for-devops",{"title":5573,"description":5574,"ogTitle":5573,"ogDescription":5574,"noIndex":6,"ogImage":1604,"ogUrl":5575,"ogSiteName":670,"ogType":671,"canonicalUrls":5575,"schema":5576},"The industry moves toward single DevOps lifecycle applications","Today's acquisition of ElectricCloud is further validation of a shift towards DevOps consolidation.","https://about.gitlab.com/blog/industry-moving-to-single-application-for-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The industry is moving towards a single application for the DevOps lifecycle\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2019-04-18\",\n      }",{"title":5578,"description":5574,"authors":5579,"heroImage":1604,"date":5580,"body":5581,"category":299,"tags":5582},"The industry is moving towards a single application for the DevOps lifecycle",[745],"2019-04-18","\n\nToday [CloudBees acquired ElectricCloud](https://www.cloudbees.com/press/cloudbees-acquires-market-leader-electric-cloud-creating-continuous-delivery-powerhouse) to strengthen their continuous delivery model.\nWith this acquisition we’re seeing the industry move in the direction that GitLab set forth.\nWe’ve seen Atlassian and GitHub follow our lead when we were the first to offer continuous\nintegration as part of code-hosting, and today with the CloudBees announcement we are seeing\na shift towards DevOps consolidation. Enterprises are demanding products that span a larger\npart of the DevOps lifecycle. This acquisition further validates GitLab's approach of a single,\nintegrated application for developers.\n\nGitLab focuses on the entire DevOps lifecycle. From the initial stages of your application creation\nprocess to the delivery of the software to market, GitLab helps you every step of the way.\nIn addition, instead of rearchitecting legacy software to be cloud native, GitLab was built to\nsupport cloud native deployments, helping enterprises embrace the cloud native way of developing software.\n\nWith GitLab you get a single application built from the ground up to provide a seamless experience\nacross every stage of the software development, delivery, and operations lifecycle.\nGitLab’s approach is to design components that fit, rather than taking separate tools that weren’t\ndesigned to work together and trying to integrate them. This approach ensures organizations\ncan increase cycle times and take advantage of more collaborative workflows.\n\n![GitLab's DevOps lifecycle](https://about.gitlab.com/images/blogimages/dev-ops-plan-to-monitor.png){: .medium.center}\n\nWatch how GitLab helps with everything from planning to monitoring here:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/nMAgP4WIcno\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n*\u003Csmall>Note: This demo covers up to GitLab 11.3 – we are now on GitLab 11.9 and have shipped more features since this was recorded.\u003C/small>*\n\nWe invite you to get started with GitLab to see how we can help move your software development\nprocess between developer and operations teams. We value your feedback and look forward to\ncontinuing to lead forward the industry.\n",[9,726],{"slug":5584,"featured":6,"template":686},"industry-moving-to-single-application-for-devops","content:en-us:blog:industry-moving-to-single-application-for-devops.yml","Industry Moving To Single Application For Devops","en-us/blog/industry-moving-to-single-application-for-devops.yml","en-us/blog/industry-moving-to-single-application-for-devops",{"_path":5590,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5591,"content":5597,"config":5602,"_id":5604,"_type":14,"title":5605,"_source":16,"_file":5606,"_stem":5607,"_extension":19},"/en-us/blog/inside-dora-performers-score-in-gitlab-value-streams-dashboard",{"title":5592,"description":5593,"ogTitle":5592,"ogDescription":5593,"noIndex":6,"ogImage":5594,"ogUrl":5595,"ogSiteName":670,"ogType":671,"canonicalUrls":5595,"schema":5596},"Inside DORA Performers score in GitLab Value Streams Dashboard ","Learn how four key metrics drive DevOps maturity, helping teams optimize workflows and achieve DevOps excellence.\n","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098908/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_644947854_248JIrEOCaGJdfJdiSjYde_1750098907747.jpg","https://about.gitlab.com/blog/inside-dora-performers-score-in-gitlab-value-streams-dashboard","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Inside DORA Performers score in GitLab Value Streams Dashboard \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Haim Snir\"}],\n        \"datePublished\": \"2024-01-18\",\n      }",{"title":5592,"description":5593,"authors":5598,"heroImage":5594,"date":5599,"body":5600,"category":769,"tags":5601},[4146],"2024-01-18","The DevOps Research and Assessment ([DORA](https://docs.gitlab.com/ee/user/analytics/dora_metrics.html)) metrics are industry-standard measurements to help better understand the capabilities that drive software delivery and operations performance. GitLab recently added a DORA Performers score panel to the Value Streams Dashboard in the GitLab DevSecOps Platform to visualize the status of the organization's DevOps performance across different projects.\n\nThis new visualization displays a breakdown of the DORA performance levels, designating a score level for each project under a group. Executives can use this visualization to easily identify the highs and lows in DORA scores and understand their organization's DevOps health top to bottom.\n\n> [Try the Value Streams Dashboard today.](https://about.gitlab.com/blog/getting-started-with-value-streams-dashboard/)\n\n## What are DORA metrics?\n\nDuring the past nine years, the DORA team gathered insights from over 36,000 professionals around the globe on how to measure the performance of a software development team. They identified four metrics as key indicators to measure software teams' development effectiveness and efficiency:\n\n- [Deployment frequency](https://docs.gitlab.com/ee/user/analytics/dora_metrics.html#deployment-frequency) and [Lead time for changes](https://docs.gitlab.com/ee/user/analytics/dora_metrics.html#lead-time-for-changes) measure team velocity.\n- [Change failure rate](https://docs.gitlab.com/ee/user/analytics/dora_metrics.html#change-failure-rate) and [Time to restore service](https://docs.gitlab.com/ee/user/analytics/dora_metrics.html#time-to-restore-service) measure stability.\n\nBy analyzing these metrics, teams are able to find areas for improvement, optimize their workflows, and ultimately drive positive business results.\n\nDORA uses these metrics to identify high-performing, medium-performing, and low-performing teams.  These performance levels provide a framework for organizations to assess their DevOps maturity and effectiveness.\n\n![DORA performers](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098929/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098929143.png)\n\nHigh performance indicates that the team is operating at excellent speed and stability in their software delivery, reaching the peak of DevOps maturity.\n\nMedium and low performance levels suggest opportunities for improvement in different aspects of the software development and delivery process.\n\nLet's take a closer look at the DORA definition for each performance level.\n\n![Chart of performance metrics](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098929/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098929144.png)\n\u003Csup>\u003Csub>_Source: [DORA Accelerate State of DevOps report](https://cloud.google.com/blog/products/devops-sre/dora-2022-accelerate-state-of-devops-report-now-out)_\u003C/sub>\u003C/sup>\u003Cp>\u003C/p>\n\n## GitLab definitions for the DORA score performance levels\n\nDORA metrics are available out of the box in the GitLab DevSecOps platform. To enable the score calculation to operate \"out of the box\" with GitLab, we adjust the scoring rules so they work with the platform's unified data model. Read more in the [score definition documentation](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html#dora-performers-score-panel).\n\nThe goal is for organizations to strive for high performance in these metrics, as a high score often correlates with better business outcomes, such as increased efficiency, faster time-to-market, and higher software quality.\n\n## DORA metrics in GitLab\n\nIn addition to the Value Streams dashboard, the DORA metrics are available also on the [CI/CD analytics charts](https://docs.gitlab.com/ee/user/analytics/ci_cd_analytics.html), which show the history of DORA metrics over time, and on [Insights reports](https://docs.gitlab.com/ee/user/project/insights/index.html#dora-query-parameters) where you can create custom charts.\n\nWatch our DORA overview video:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n \u003Ciframe src=\"https://www.youtube.com/embed/jYQSH4EY6_U?si=sE9rf_X58BGD2uK9\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Get started today\nYou can get started with the Value Streams Dashboard by [following the instructions](https://about.gitlab.com/blog/getting-started-with-value-streams-dashboard/) in this documentation. Then, to help us improve the value of the Value Streams Dashboard, please share feedback about your experience in this [brief survey](https://gitlab.fra1.qualtrics.com/jfe/form/SV_50guMGNU2HhLeT4).\n",[9,2243,479,1180,1040],{"slug":5603,"featured":6,"template":686},"inside-dora-performers-score-in-gitlab-value-streams-dashboard","content:en-us:blog:inside-dora-performers-score-in-gitlab-value-streams-dashboard.yml","Inside Dora Performers Score In Gitlab Value Streams Dashboard","en-us/blog/inside-dora-performers-score-in-gitlab-value-streams-dashboard.yml","en-us/blog/inside-dora-performers-score-in-gitlab-value-streams-dashboard",{"_path":5609,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5610,"content":5616,"config":5622,"_id":5624,"_type":14,"title":5625,"_source":16,"_file":5626,"_stem":5627,"_extension":19},"/en-us/blog/inside-gitlab-security-dashboards",{"title":5611,"description":5612,"ogTitle":5611,"ogDescription":5612,"noIndex":6,"ogImage":5613,"ogUrl":5614,"ogSiteName":670,"ogType":671,"canonicalUrls":5614,"schema":5615},"Security dashboards secure applications at DevOps speed","GitLab Security Dashboards enable security professionals to view vulnerabilities across a project. Here’s an inside look.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678710/Blog/Hero%20Images/inside-gitlab-security-dashboards.jpg","https://about.gitlab.com/blog/inside-gitlab-security-dashboards","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How can teams secure applications at DevOps speed? Security Dashboards are here to help.\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cindy Blake\"}],\n        \"datePublished\": \"2018-09-14\",\n      }",{"title":5617,"description":5612,"authors":5618,"heroImage":5613,"date":5619,"body":5620,"category":791,"tags":5621},"How can teams secure applications at DevOps speed? Security Dashboards are here to help.",[1921],"2018-09-14","\nBusiness survival today depends on a radically faster DevOps lifecycle, but how can teams secure applications at DevOps speed? It’s a thorny problem for a number of reasons: applications are a prime target for cyber attacks; most [application security](/topics/devsecops/) tools are resource intensive, requiring integration of both technology and processes; and testers face the dilemma of when and how often to test code that is iteratively changed right up until it’s deployed. Many are faced with weighing the need to test each iteration against the speed and cost of doing so, while the possibility of a rollback looms in the case of an unforeseen security vulnerability.\n\n>Many are faced with weighing the need to test each iteration against the speed and cost of doing so\n\nWe know that shifting left and discovering vulnerabilities earlier in the development process is important, but it’s tough to find the perfect balance, where teams can be confident they’re truly creating business value and not becoming a business inhibitor. It’s clear that our existing application security tools are colliding with modern development. So what if you could scan all code, every time for development, using fewer tools instead of more, and have developers and operations on the same page instead of adversarial?\n\n### Built-in security products\n\nIt’s going to take a fundamental shift by companies towards proactive security. With security issues reported directly in merge requests, one license cost for integrated security, and zero context-switching to proactively secure applications, we believe GitLab can help get you there.\n\nUsing multiple tools forces developers to switch away from their primary objective of developing code, or requires integrated workflows with security pros. We believe successful tools will add high value while minimizing distraction for engineers. With GitLab, [SAST](https://docs.gitlab.com/ee/user/application_security/sast/), [DAST](https://docs.gitlab.com/ee/user/application_security/dast/), [container scanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/), [dependency scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/), and [license management](https://docs.gitlab.com/ee/user/compliance/license_compliance/index.html) are all built in. Because there’s one tool for the software development lifecycle, you can automatically run tests on all code commits, early in the development process.\n\n### Security Dashboard demo\nIn 11.1, [we shipped Security Dashboards](/releases/2018/07/22/gitlab-11-1-released/), to help serve security professionals. Traditionally we’ve focused on the developer, but the Security Dashboard is meant to enable security professionals to view vulnerabilities across a project. Here’s a quick look at our first iteration of the Security Dashboard:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/U2_dqwTRUVk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nKeep an eye out for [improvements](https://gitlab.com/gitlab-org/gitlab-ee/issues/6709), and let us know what you think by tweeting us [@gitlab](https://twitter.com/gitlab)!\n\nCover photo by [Christian EM](https://unsplash.com/photos/J7EUjSlNQtg) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[9,916,728,875],{"slug":5623,"featured":6,"template":686},"inside-gitlab-security-dashboards","content:en-us:blog:inside-gitlab-security-dashboards.yml","Inside Gitlab Security Dashboards","en-us/blog/inside-gitlab-security-dashboards.yml","en-us/blog/inside-gitlab-security-dashboards",{"_path":5629,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5630,"content":5636,"config":5642,"_id":5643,"_type":14,"title":5644,"_source":16,"_file":5645,"_stem":5646,"_extension":19},"/en-us/blog/insights",{"title":5631,"description":5632,"ogTitle":5631,"ogDescription":5632,"noIndex":6,"ogImage":5633,"ogUrl":5634,"ogSiteName":670,"ogType":671,"canonicalUrls":5634,"schema":5635},"GitLab: New Tool to Visualize High-Level Project Trends","How our easy to configure Insights technology takes data from issues and merge requests to build visually appealing charts.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681053/Blog/Hero%20Images/birdseyeview.jpg","https://about.gitlab.com/blog/insights","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We're dogfooding a tool to help visualize high-level trends in GitLab projects\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-01-30\",\n      }",{"title":5637,"description":5632,"authors":5638,"heroImage":5633,"date":5639,"body":5640,"category":791,"tags":5641},"We're dogfooding a tool to help visualize high-level trends in GitLab projects",[2002],"2020-01-30","\n\nOur policy at GitLab is to [dogfood everything](/handbook/engineering/development/principles/#dogfooding) – meaning we aren't going to introduce a new product or feature to our [DevOps platform](/solutions/devops-platform/) before our engineering team tests it out. Sometimes though, the development process happens in reverse: The product and engineering teams need a specific tool or functionality to help us run GitLab better and discover a tool that has the capacity to solve many different customer use cases.\n\n[Insights](https://docs.gitlab.com/ee/user/project/insights/), which is available to [GitLab Ultimate](/pricing/ultimate/) users, is an example of such a tool. Insights is a flexible feature of GitLab that allows our users to visualize different trends in workflows, bugs, merge request (MR) throughput, and issue activity that is based upon the underlying labeling system of a group. In this blog post, we'll go in-depth on how and why we built this tool, how we use the tool at GitLab, and explain how to configure Insights for your own projects.\n\n\n- [Why we built Insights](#why-we-built-insights)\n- [Labels powers Insights](#why-label-hygiene-matters)\n- [How to configure Insights](#configuring-your-insights-dashboard)\n- [How GitLab uses Insights](#how-we-are-dogfooding-insights)\n- [Implementing Insights in your instance](#implementing-insights-for-your-team)\n\n[Kyle Wiebers](/company/team/#kwiebers), quality engineering manager on Engineering Productivity, gives an overview of how we use Insights at GitLab in the GitLab Unfiltered video embedded below. Watch the video and read the rest of the post to learn all about this exciting new tool we're dogfooding at GitLab.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/kKnQzS9qorc\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Why we built Insights\n\nThe [Engineering Productivity team](/handbook/engineering/quality/#engineering-productivity) at GitLab first built Insights to provide an overview of trends in the issue tracker, but soon realized that this technology can be applied in different ways that were beneficial to our needs, and the needs of our users.\n\n\"The initial thing was we were interested in when the bugs were being raised: Were they being raised around release time or were they being raised the middle of a phase?\" says [Mark Fletcher](/company/team/#markglenfletcher), backend engineer on Engineering Productivity. \"Because we did have bugs being created just after release, which led to regressions, which led to patch fixes. So we were just interested in exploring those kinds of trends.\"\n\nTo capture this trend data the Quality Engineering team created the [quality dashboard](https://quality-dashboard.gitlap.com/groups/gitlab-org), which was essentially the first iteration of Insights for GitLab. While the quality dashboard showed trends in bugs being raised per release cycle, it also showed how much work was being accomplished over the same period.\n\n\"And that's where the scope really changed from looking at issues that are bugs to merge requests and being able to have generic rules based on labels that we can use to align with our workflow,\" says Kyle.\n\n## Why label hygiene matters\n\nThe Engineering Productivity team soon realized that a lot of the different trends they were aiming to capture with Insights were powered by [labels](https://docs.gitlab.com/ee/user/project/labels.html#overview). Labels allow a GitLab user to categorize epics, issues, and merge requests with descriptive titles such as \"bug\" or \"feature request\" and quickly filter based upon category. The label filtering system works inside the [issue tracker](https://gitlab.com/gitlab-org/gitlab/-/issues/?sort=created_date&state=opened&first_page_size=100), and all throughout GitLab, and is a core part of the underlying configuration of Insights.\n\nA good example of an Insights dashboard that is configured by labels and the metadata that underlies issues and merge requests (such as creation date) is the [MR throughputs dashboard](https://gitlab.com/groups/gitlab-org/-/insights/#/throughputs).\n\n![Merge request throughputs for group](https://about.gitlab.com/images/blogimages/merge_throughputs_group.png){: .shadow.medium.center}\nA screenshot of the chart for merge request throughouts at the group level.\n{: .note.text-center}\n\nThe MR throughputs dashboard captures how many MRs are completed during a given week or month to measure our organization's overall performance. It is part of our workflow to assign labels to MRs that help distinguish the type of MR being worked on: feature, bug, community contribution, security, or backstage. This dashboard is configured as a stacked bar chart, which makes it easy to visualize MR throughput by type so we can see the type of work being created over a fixed period of time. The chart is also divided into weekly or monthly views, which helps us see both short- and long-term trends.\n\n\"So, we can look at short-term trends and longer-term trends to see: Are we delivering more work? Are we hitting a bottleneck? Are we plateauing? And that allows us to dive a little bit deeper and take corrective action,\" says Kyle.\n\n### Labels help simplify the configuration of dashboards\n\nIf you look to the lefthand sidebar of the MR throughputs dashboard, you'll notice that the dashboard is configured at the Gitlab-org group level. The group level of GitLab-org contains all of the projects within GitLab-org and therefore captures all of the MR throughput data across all projects.\n\nThe project level is a level below the group level and looks at a specific project contained within a larger group, such as the GitLab project in the GitLab-org group.\n\n![Merge request throughputs for project](https://about.gitlab.com/images/blogimages/mr_throughputs_product.png){: .shadow.medium.center}\nA screenshot of the chart for merge request throughoutputs at the project level.\n{: .note.text-center}\n\nAny Insights dashboard, including the MR throughputs dashboard, can be filtered at the group level or the project level, but the configuration remains the same regardless of how the dashboard is filtered.\n\n\"So everything that's contained within a group, and in our case, it would be the GitLab-org group, you can also have this on a project level,\" says Kyle. \"So if you want to look at Insights on a project, you can configure the same thing on a project. Just for our use case, it made sense to look at MR throughputs across multiple projects versus one specific project.\"\n\nBut in the end, it all comes back to labels. We don't have to configure the Insights dashboard differently for groups and projects because all of our labels at GitLab are set up at the group level and then propagate down to the project level.\n\nOne of the characteristics of Insights that makes it such a valuable feature is that the configuration is so flexible. While most customers will use the same labeling system across groups and projects as GitLab does, it is possible to configure the charts separately at the project and group level.\n\n\"The scope [of Insights] changed from looking at issues that are bugs to merge requests and being able to have generic rules based on labels that we can use to align with our workflow,\" says Kyle. \"Then that flexibility allows any customers to leverage the same feature based on their own specific workflow or labeling practices.\"\n\nA user can use Insights on a group or project regardless of the underlying labeling system. They just need to configure the dashboard according to their workflow.\n\n## Configuring your Insights dashboard\n\nThere are numerous Insights dashboards that are available out of the box or that can be [easily configured](https://docs.gitlab.com/ee/user/project/insights/#configure-your-insights) based on a user's labeling workflow.\n\nAll of the Insights dashboards within GitLab are [driven by a YAML file](https://gitlab.com/gitlab-org/quality/insights-config/-/blob/master/.gitlab/insights.yml). The configuration for each chart includes configuration parameters: title, type, and query.\n\nThe query section defines the type of issues and/or merge requests from the issue tracker that will be included in the chart. The [parameters for which labels are contained in the chart](https://docs.gitlab.com/ee/user/project/insights/#queryfilter_labels) fall under the query section as well.\n\n\"The Insights configuration is actually stored in [one of your project's repositories]. So, it can be changed just like you do any of your code. It can be [version-controlled](/topics/version-control/) so you can see changes over time. That gives you a lot of value to just ensure that there's very clear traceability into why was this dashboard changed, and when was it changed,\" says Kyle.\n\nHere is the configuration that underlies the [MR throughputs dashboard](https://gitlab.com/groups/gitlab-org/-/insights/#/throughputs) we looked at extensively in the section above.\n\n```\nthroughputs:\n  title: Merge Request Throughputs (product only projects)\n  charts:\n    - title: Throughputs per Week\n      type: stacked-bar\n      query:\n        issuable_type: merge_request\n        issuable_state: merged\n        collection_labels:\n          - Community contribution\n          - security\n          - bug\n          - feature\n          - backstage\n        group_by: week\n        period_limit: 12\n    - title: Throughputs per Month\n      type: stacked-bar\n      query:\n        issuable_type: merge_request\n        issuable_state: merged\n        collection_labels:\n          - Community contribution\n          - security\n          - bug\n          - feature\n          - backstage\n        group_by: month\n        period_limit: 24\n```\n{: .language-ruby}\n\nExplore the [Insights YAML file for GitLab](https://gitlab.com/gitlab-org/gitlab-insights/blob/master/.gitlab/insights.yml) to see how we set up some of our other charts.\n\n## How we are dogfooding Insights\n\nInsights is most effective at monitoring high-level trends, as well as measuring performance against a specific measurable objective with the aim of taking corrective action. At GitLab, we've been using our Insights technology in different ways to visualize our overall performance or to answer specific questions.\n\nOur Support and Quality Engineering teams at GitLab currently use Insights, but in different ways. By dogfooding the technology here at GitLab, we've found numerous use cases for Insights that could be valuable to our customers.\n\n### How our Support team uses Insights\n\nThe Support team uses Insights both as an out of the box issue tracking dashboard and as a customized dashboard made possible using automation.\n\n#### Bugs SLO chart\n\nThe [Bugs SLO dashboard](https://gitlab.com/gitlab-org/gitlab/insights/#/bugsPastSLO) was created so the Support department and engineering leaders can identify bugs overdue from SLO.\n\n![Support team Bugs SLO chart](https://about.gitlab.com/images/blogimages/bugs_slo.png){: .shadow.medium.center}\nA chart specially configured for the Support team to show how many bugs missed the SLO each month.\n{: .note.text-center}\n\nThe Bugs SLO chart is configured in the GitLab-org group but lives in the GitLab project. The chart pulls open issues pertaining to bugs and customer bugs, that are labeled `missed-SLO` and groups them by month. We also have a [labeling system for categorizing based on priority](https://docs.gitlab.com/ee/development/labels/index.html#priority-labels) – P1 bugs are top priority, P2 bugs are second priority.\n\n\"This really allows us to, again, look at the trends: Are we improving? Are we getting worse? Do we need to look a little bit deeper here and do a corrective action to help address any problems that we see within the trends that Insights provides?\" says Kyle.\n\n#### Configuration of SLO chart\n\nHere is a peek at what happens inside the YAML file to configure the bugs SLO chart.\n\n```\nbugsPastSLO:\n  title: Bugs Past SLO\n  charts:\n    - title: Open bugs past priority SLO by creation month\n      type: stacked-bar\n      query:\n        issuable_type: issue\n        issuable_state: opened\n        filter_labels:\n          - bug\n          - missed-SLO\n        collection_labels:\n          - P1\n          - P2\n        group_by: month\n        period_limit: 24\n    - title: Open customer bugs past priority SLO by creation month\n      type: stacked-bar\n      query:\n        issuable_type: issue\n        issuable_state: opened\n        filter_labels:\n          - bug\n          - missed-SLO\n          - customer\n        collection_labels:\n          - P1\n          - P2\n        group_by: month\n        period_limit: 24\n```\n{: .language-ruby}\n\n#### Triage helps ensure good label hygiene\n\nFor the Bugs SLO chart, we use the [GitLab triage project](https://gitlab.com/gitlab-org/gitlab-triage) to [automatically apply the `missed-SLO` label to open issues with priority labels that miss the SLO target](/handbook/engineering/quality/triage-operations/#missed-slo). We use automation here because the GitLab project is so massive, it would not be feasible to manually apply this label based upon the missed SLO target rules. Insights is flexible enough that either manual labeling or automation can be used on any dashboard.\n\n### Support issue tracker\n\nThe Support team used one of our out of the box dashboards to [see how many Support issues are open and closed per month](https://gitlab.com/gitlab-com/support-forum/insights/#/issues) with the [GitLab.com Support Tracker project](https://gitlab.com/gitlab-com/support-forum), which looks at support issues raised by GitLab.com users that don't go through the Support team.\n\n![Support issue tracker](https://about.gitlab.com/images/blogimages/support_issue_tracker.png){: .shadow.medium.center}\nThe Support team also uses one of our out of the box dashboards that tracks the number of issues open and closed each month.\n{: .note.text-center}\n\n\"This shows that [the dashboard] is quite useful out of the box to just see some visualizations without doing any configuration,\" says Mark. \"These were the charts that we thought would give the most value to a team or to a project without doing any config whatsoever.\"\n\n## How our Quality Engineering team uses Insights\n\nThe Quality Engineering team uses Insights to look at opportunities to remedy gaps in a specific project in our EE, as well as to visualize flaky tests on GitLab based on reported issues.\n\n### Enterprise Edition testcases chart\n\nOne of our more specific use cases is the Enterprise testcases chart. The Quality Engineering department is working to close the gap in testcases in the GitLab Enterprise. The team [configured a chart](https://gitlab.com/gitlab-org/quality/testcases/insights/#/eeTestcasesCharts) within the [testcases project](https://gitlab.com/gitlab-org/quality/testcases/tree/master) to help visualize how many open and closed test gaps there are, separated by GitLab product area, and GitLab product tier.\n\n![EE testcases chart](https://about.gitlab.com/images/blogimages/EE_testcases.png){: .shadow.medium.center}\nQuality Engineering configured this chart to visualize gaps in testcases on GitLab Enterprise.\n{: .note.text-center}\n\n\"Looking at this chart, we may say, ‘Maybe we should have a few people focus on the gaps in verify because it has the most open testcases at the current point',\" says Kyle.\n\n#### Configuration of EE testcases chart\n\nThe EE testcases chart is not something that is available out of the box, but the [configuration for the chart](https://gitlab.com/gitlab-org/quality/testcases/blob/master/.gitlab/insights.yml) is pretty simple nonetheless.\n\n```\neeTestcasesCharts:\n  title: 'Charts for EE Testcases'\n  charts:\n    - title: Open testcases (backlog) by stage\n      type: bar\n      query:\n        issuable_type: issue\n        issuable_state: opened\n        filter_labels:\n          - \"Quality:EE test gaps\"\n        collection_labels:\n          - \"devops::configure\"\n          - \"devops::create\"\n          - \"devops::protect\"\n          - \"devops::enablement\"\n          - \"devops::growth\"\n          - \"devops::manage\"\n          - \"devops::monitor\"\n          - \"devops::package\"\n          - \"devops::plan\"\n          - \"devops::release\"\n          - \"devops::secure\"\n          - \"devops::verify\"\n```\n{: .language-ruby}\n\nThe configuration shows that this is a bar chart that is looking at open issues with the filter `Quality:EE test gaps`. The collection labels are what broke the bars out into different columns. While it is possible to illustrate the data in very intricate ways, the underlying schema to configure the chart is actually quite simple, mirroring the process of searching the issue tracker by filtering based on labels.\n\n![Issue tracker](https://about.gitlab.com/images/blogimages/issue_tracker_EE.png){: .shadow.medium.center}\nThe issues represented in the EE testcases chart can be searched for by label using the issue tracker in the testcases project.\n{: .note.text-center}\n\nOpening the issue tracker for the testcases project, you can search by `Quality:EE test gaps` label, select open issues, to see the actual issues represented by the Insights chart.\n\nThe key takeaway: If your team has good label hygiene and a logical workflow, building charts based on Insights should not be particularly challenging.\n\n### End-to-end transient failures\n\nThe Quality Engineering team monitors how often we have reports of flaky tests in our pipeline by looking at the number of issues created that fit the label schema.\n\n![End-to-end transient failure chart](https://about.gitlab.com/images/blogimages/end_to_end_chart.png){: .shadow.medium.center}\nA second chart configured for Quality Engineering is the end-to-end transient failure chart, which looks at flaky tests.\n{: .note.text-center}\n\nSimilar to many of our other charts, this is a stacked bar graph that looks at both open and closed issues on a weekly basis, and the underlying configuration is as you might expect.\n\n```\ntransientFailures:\n  title: End to end transient failures\n  charts:\n    - title: Opened transient failures per week\n      type: stacked-bar\n      query:\n        issuable_type: issue\n        issuable_state: opened\n        filter_labels:\n          - \"Quality\"\n          - \"QA\"\n          - \"bug\"\n        collection_labels:\n          - \"found:gitlab.com\"\n          - \"found:canary.gitlab.com\"\n          - \"found:staging.gitlab.com\"\n          - \"found:staging-orchestrated\"\n          - \"found:dev.gitlab.com\"\n          - \"found:nightly\"\n          - \"found:in MR\"\n        group_by: week\n        period_limit: 24\n    - title: Closed transient failures per week\n      type: stacked-bar\n      query:\n        issuable_type: issue\n        issuable_state: closed\n        filter_labels:\n          - \"Quality\"\n          - \"QA\"\n          - \"bug\"\n        collection_labels:\n          - \"found:gitlab.com\"\n          - \"found:canary.gitlab.com\"\n          - \"found:staging.gitlab.com\"\n          - \"found:staging-orchestrated\"\n          - \"found:dev.gitlab.com\"\n          - \"found:nightly\"\n          - \"found:in MR\"\n        group_by: week\n        period_limit: 24\n```\n{: .language-ruby}\n\n## Implementing Insights for your team\n\nIf your team is often pulling data from GitLab through an API or CSV export, and then building charts based on issues and merge request data, then Insights will make your life a lot easier!\n\nSome questions to think about before implementing Insights include: How would you want to categorize the work being done and the issues that are being created? How do you want to monitor the open/close rates on your issues? Also, how do you plan on using labels?\n\nInsights users really need to define their workflows and have a clear idea about how they're using labels. We recommend having some sort of [automated mechanism to ensure good label hygiene](/handbook/engineering/quality/triage-operations/#triage-automation). [GitLab Triage](https://gitlab.com/gitlab-org/gitlab-triage) is our open source project that automates labeling of issues on our giant GitLab project and is a good candidate for any organization that has a large backlog of issues.\n\nWe recommend users [read up more on the issues workflow](https://docs.gitlab.com/ee/development/contributing/issue_workflow.html) to learn more about how to use labels and the issue tracker, which is valuable background knowledge to improve your use of Insights.\n\nWe've been dogfooding Insights for a time to help iron out any wrinkles in the implementation or application of this technology, but we also want to hear your ideas of how we can make improvements to Insights. [Create an issue in the GitLab project issue tracker](https://gitlab.com/gitlab-org/gitlab/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=insights) with the Insights label to share your feedback with us!\n\nCover photo by [Aaron Burden](https://unsplash.com/@aaronburden) on [Unsplash](https://unsplash.com/photos/Qy-CBKUg_X8).\n{: .note.text-center}\n",[916,9,728],{"slug":679,"featured":6,"template":686},"content:en-us:blog:insights.yml","Insights","en-us/blog/insights.yml","en-us/blog/insights",{"_path":5648,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5649,"content":5654,"config":5659,"_id":5661,"_type":14,"title":5662,"_source":16,"_file":5663,"_stem":5664,"_extension":19},"/en-us/blog/integrating-gitlab-com-with-atlassian-jira-cloud",{"title":5650,"description":5651,"ogTitle":5650,"ogDescription":5651,"noIndex":6,"ogImage":3885,"ogUrl":5652,"ogSiteName":670,"ogType":671,"canonicalUrls":5652,"schema":5653},"How to integrate GitLab.com with Jira Cloud","Check out how to use the GitLab App on the Atlassian Marketplace to connect your merge requests, branches, and commits to a Jira issue.","https://about.gitlab.com/blog/integrating-gitlab-com-with-atlassian-jira-cloud","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to integrate GitLab.com with Jira Cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tye Davis\"}],\n        \"datePublished\": \"2021-03-25\",\n      }",{"title":5650,"description":5651,"authors":5655,"heroImage":3885,"date":5656,"body":5657,"category":791,"tags":5658},[3388],"2021-03-25","By moving to the cloud engineering teams can accelerate innovation and scale resources across an organization. The ease of access and reduced infrastructure costs that comes with moving to the cloud is a direct result of using a platform that easily integrates your data and keeps it secure yet accessible. Gitlab.com, the cloud (SAAS) platform for GitLab, modernizes data platforms to leverage new applications and advances end-to-end software delivery. GitLab partners with other best-in-class cloud companies so your teams can use tools that best align with your team's DevOps ecosystem. Application development requires speed and iteration, making seamless collaboration a necessity to deliver real business value. GitLab embraces connecting all phases of the software development lifecycle (SDLC) in a DevOps ecosystem that fuels visibility, collaboration, and velocity.\n\n## How to use GitLab with Atlassian's Jira\n\nWe know that many companies have been using Jira for project management, and have existing data and business processes built into their instance. For some of these customers, this means it can be difficult and cost-prohibitive to move off of Jira. We believe that people (and tools) work better when they're all in one place, so to serve these customers, we built a seamless integration between GitLab and Jira. By using the [GitLab for Jira app in the Atlassian Marketplace](https://marketplace.atlassian.com/apps/1221011/gitlab-com-for-jira-cloud), you can integrate GitLab.com and Jira Cloud harmoniously.\n\nHere's a short list of what you can do when integrating GitLab with Jira:\n\n* One GitLab project integrates with all the Jira projects in a single Jira instance.\n* Quickly navigate to Jira issues from GitLab.\n* Detect and link to Jira issues from GitLab commits and merge requests.\n* Log GitLab events in the associated Jira issue.\n* Automatically close (also called \"transition\") Jira issues with GitLab commits and merge requests.\n\n## How to configure the integration\n\nThere are two methods for configuring the integration. The [Jira DVCS connector](https://docs.gitlab.com/ee/integration/jira/dvcs/), and the method we describe in this blog post. The DVCS connector updates data only once per hour, while our method syncs data in real time. We recommend using our method for this reason, but if you are not using both of these environments then use the Jira DVCS connector instead.\n\n- First, go to Jira Settings > Apps > Find new apps, then search for GitLab.\n- Next, click GitLab for Jira, then click \"Get it now\". Or, go the [App in the marketplace](https://marketplace.atlassian.com/apps/1221011/gitlab-for-jira), directly.\n\n![Arrow pointing to \"get it now button\" on GitLab on Atlassian Marketplace App](https://about.gitlab.com/images/blogimages/atlassianjira/gitlabonatlassianmarketplace.png){: .shadow.medium.center}\nClick the yellow button to download the app.\n{: .note.text-center}\n\n- Third, after installing, click \"Get started to go to the configurations\" page. This page is always available under Jira Settings > Apps > Manage apps.\n\n![GitLab on Atlassian Marketplace App](https://about.gitlab.com/images/blogimages/atlassianjira/manageappsjira.png){: .shadow.medium.center}\nClick the \"Get started button\".\n{: .note.text-center}\n\n- Fourth, in Namespace, enter the group or personal namespace, and then click \"Link namespace to Jira\". The user that is setting up GitLab for Jira must have Maintainer access to the GitLab namespace. Note: The GitLab user only needs access when adding a new namespace. For syncing with Jira, we do not depend on the user’s token.\n\n![GitLab for Jira Configuration](https://about.gitlab.com/images/blogimages/atlassianjira/gitlabforjiraintegration.png){: .shadow.medium.center}\nAdd a namespace.\n{: .note.text-center}\n\nAfter a namespace is added, all of the future commits, branches, and merge requests within all projects under that namespace will be synced to Jira. At the moment, past data cannot be synced.\n\nFor more information, see [the documentation](https://docs.gitlab.com/ee/integration/jira/index.html#usage).\n\n### How to troubleshoot GitLab for Jira\n\nThe GitLab for Jira App uses an iframe to add namespaces on the settings page. Some browsers block cross-site cookies which can lead to a message saying that the user needs to log on to GitLab.com even though the user is already logged in: \"You need to sign in or sign up before continuing.\"\n\nIn this situation, we recommend using [Firefox](https://www.mozilla.org/en-US/firefox/), [Google Chrome](https://www.google.com/chrome/index.html) or enabling cross-site cookies in your browser.\n\n### What are the limitations of GitLab for Jira?\n\nThis integration is currently not supported on GitLab instances under a [relative URL](https://docs.gitlab.com/omnibus/settings/configuration.html#configuring-a-relative-url-for-gitlab) (for example, http://yourcompanyname.com/gitlab).\n\n## How to use GitLab for Jira\n\nAfter the integrating GitLab and Jira, you can:\n\n- Refer to any Jira issue by its ID in GitLab branch names, commit messages, and merge request titles.\n\n- Using commit messages in GitLab, you can move Jira issues along that Jira projects defined transitions.\n\n![GitLab for Jira Setup](https://about.gitlab.com/images/blogimages/atlassianjira/jiraissuescreenshot.png){: .shadow.medium.center}\nIn this image, you can see that this Jira issue has four stages: Backlog, selected for development, in progress, and done.\n{: .note.text-center}\n\n- As referenced in the base GitLab-Jira integration, when you reference an issue in a comment on a merge request and commit, e.g., PROJECT-7, the basic integration adds a comment in Jira issue. Also, by commenting in a Jira transition (putting a # first), this will move a Jira issue to the desired transition. Below is an example using the built-in GitLab Web IDE (this can be done in your Web IDE of choice as well).\n\n![View of Jira Transitions](https://about.gitlab.com/images/blogimages/atlassianjira/jiraissuescreenshot2.png){: .shadow.medium.center}\nThere are multiple Jira transition options.\n{: .note.text-center}\n\n- Now, the user can see linked branches, commits, and merge requests in Jira issues (merge requests are called \"pull requests\" in Jira issues).\nJira issue IDs must be formatted in UPPERCASE for the integration to work.\n\n![View branches, commits and merge requests in your jira issue](https://about.gitlab.com/images/blogimages/atlassianjira/jiraissuescreenshot4.png){: .shadow.medium.center}\nView branches, commits, and merge requests in your Jira issue.\n{: .note.text-center}\n\n- Click the links to see your GitLab repository data.\n\n![Deep Dive into your GitLab commits](https://about.gitlab.com/images/blogimages/atlassianjira/jiraissuescreenshot5.png){: .shadow.medium.center}\nHow to take a look at your GitLab commits.\n{: .note.text-center}\n\n![Deep Dive into your GitLab branches](https://about.gitlab.com/images/blogimages/atlassianjira/jiraissuescreenshot6.png){: .shadow.medium.center}\nTake a deep Dive into your GitLab merge requests.\n{: .note.text-center}\n\nFor more information on using Jira Smart Commits to track time against an issue, specify an issue transition, or add a custom comment, see the Atlassian page using [Smart Commits](https://support.atlassian.com/jira-cloud-administration/docs/enable-smart-commits/).\n\n## Watch and learn\n\nMore of a video person? For a walkthrough of the integration with GitLab for Jira, watch and learn how to configure GitLab Jira Integration using Marketplace App.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/SwR-g1s1zTo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nGitLab helps teams ship software faster with technology integration options, such as the integration with Jira, that automate tasks, provide visibility into development progress and the greater end-to-end software lifecycle. We recognize that many companies use Jira for Agile project management and our seamless integration brings Jira together with GitLab.\n\nCover image by [Mikołaj Idziak](https://unsplash.com/@mikidz) on [Unsplash](https://unsplash.com/photos/nwjRmbXbLgw).\n{: .note.text-left}\n",[1041,9,977],{"slug":5660,"featured":6,"template":686},"integrating-gitlab-com-with-atlassian-jira-cloud","content:en-us:blog:integrating-gitlab-com-with-atlassian-jira-cloud.yml","Integrating Gitlab Com With Atlassian Jira Cloud","en-us/blog/integrating-gitlab-com-with-atlassian-jira-cloud.yml","en-us/blog/integrating-gitlab-com-with-atlassian-jira-cloud",{"_path":5666,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5667,"content":5673,"config":5678,"_id":5680,"_type":14,"title":5681,"_source":16,"_file":5682,"_stem":5683,"_extension":19},"/en-us/blog/integrating-with-gitlab-secure",{"title":5668,"description":5669,"ogTitle":5668,"ogDescription":5669,"noIndex":6,"ogImage":5670,"ogUrl":5671,"ogSiteName":670,"ogType":671,"canonicalUrls":5671,"schema":5672},"How open source contributions accelerate GitLab Secure","Community contributions and an open integration framework allows anyone to extend GitLab Secure","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668622/Blog/Hero%20Images/group-rowing-collaboration.jpg","https://about.gitlab.com/blog/integrating-with-gitlab-secure","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How open source contributions accelerate GitLab Secure\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2020-10-22\",\n      }",{"title":5668,"description":5669,"authors":5674,"heroImage":5670,"date":5675,"body":5676,"category":1318,"tags":5677},[2862],"2020-10-22","\nWhen you think about security you probably imagine locks, gates, and closed systems. This is the more traditional approach to security but modern security is much more open and collaborative. If you want to build the most secure systems, there is nothing better than building those systems in the open. Open security practices allow you to get fast feedback from a broad audience with diverse perspectives, helping you build better more holistic solutions. That's our approach to building [GitLab Secure](/stages-devops-lifecycle/secure/) at GitLab. We're leveraging amazing open source security projects, the collective contribution of the wider community, and providing an open integration system for anyone to build on top of GitLab security scanners.\n\n## Shifting left\n\nTraditional security approaches are opaque and late in the development life cycle. Security scans are performed by isolated security experts long after developers write code, often after it's deployed to production. GitLab aims to make security an integrated and continuous process. That's why we've built [GitLab Secure directly integrated into the DevOps life cycle](/solutions/security-compliance/). We are taking security tools and \"shifting left\" to make these tools more accessible to developers earlier in the development life cycle and integrated directly into developers' workflows.\n\n![Traditional Security vs DevSecOps with GitLab](https://about.gitlab.com/images/blogimages/traditional-security-vs-integrated.png)\n\nWe created a detailed survey to learn more about the [2020 DevSecOps Landscape](/developer-survey/#security). The results of the survey indicated that security is still a significant hurdle for most organizations that use DevOps, and show:\n\n- Only 13% of companies give developers access to the results of [application security](/topics/devsecops/) tests\n- Over 42% said testing happens too late in the lifecycle\n- 36% reported it was hard to understand, process, and fix any discovered vulnerabilities\n- 31% found prioritizing vulnerability remediation an uphill battle\n\nThese statistics illustrate why we are building security scanning directly into GitLab with our Secure features. We want to provide integrated security tools to broaden access and make it easier for everyone using GitLab to write more secure code.\n\n## Integrating security tools into everyday workflows\n\nGitLab Secure enables accurate, automated, and continuous assessment of your applications and services, allowing users to proactively identify vulnerabilities and weaknesses to minimize security risk. Secure is not an additional step in your development process nor an additional tool to introduce to your software stack. It is woven into your DevOps cycle, which allows you to adapt security testing and processes to your developers (and not the other way around).\n\nToday [GitLab Secure](/stages-devops-lifecycle/secure/) offers support for a variety of security scanning tools including:\n- [Static Application Security Testing (SAST)](https://docs.gitlab.com/ee/user/application_security/sast/)\n- [Dynamic Application Security Testing (DAST)](https://docs.gitlab.com/ee/user/application_security/dast/)\n- [Container Scanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/)\n- [Dependency Scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/)\n- [License Scanning](https://docs.gitlab.com/ee/user/compliance/license_compliance/index.html)\n- [Secret Detection](https://docs.gitlab.com/ee/user/application_security/secret_detection/)\n- [API Fuzzing](https://docs.gitlab.com/ee/user/application_security/api_fuzzing/)\n- [Coverage Fuzzing](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/)\n\nAll of these tools provide unique approaches to finding security problems. No one tool is best at everything, so we wanted to provide a way to leverage many tools in an integrated way, so you're always getting the most relevant security results. Take a look at how GitLab Secure integrates all these tools into common developer workflows on GitLab:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/XnYstHObqlA\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## Democratizing security\n\nWith GitLab Secure, we've laid the foundation for bringing security tools directly into developers' workflows. At GitLab, we believe in a world where [everyone can contribute](/company/culture/#everyone-can-contribute). [Collaboration](https://handbook.gitlab.com/handbook/values/#collaboration) and [transparency](https://handbook.gitlab.com/handbook/values/#transparency) are part of our core values. This approach changes the way we build security features. That's why as part of our [community stewardship promise](/company/stewardship/#promises) we've made all our open source based [SAST scanners available for all users](/releases/2020/08/22/gitlab-13-3-released/#sast-security-analyzers-available-for-all), we offer [open source projects and nonprofits free access to our best features](/solutions/open-source/join/), and we've created a [security scanner integration framework](https://docs.gitlab.com/ee/development/integrations/secure.html) to allow anyone to contribute security scan tools. Our entire [product strategy and vision](/direction/secure/) is also open source, so everyone can understand our vision for an integrated, accessible, and democratic approach to security. Together we can build a more open and modern security approach that helps developers everywhere write more secure code.\n\n## Integrate with GitLab Secure\n\nOut of the box, GitLab provides a variety of pre-integrated and actively managed open source security tools, such as [SAST's 16 analyzers](https://docs.gitlab.com/ee/user/application_security/sast/#supported-languages-and-frameworks) that all support automatic language detection to always run the most relevant security tool. While GitLab will continue to update and build first-party integrations we wanted to ensure that GitLab contributors and integration partners could easily extend GitLab Secure for third-party tools. Our [open integration framework](https://docs.gitlab.com/ee/development/integrations/secure.html) makes it easy for anyone to leverage all of the [features of GitLab Secure](/pricing/feature-comparison/) with any scanning tool they may want to integrate. You can see all the tools GitLab users have requested support for and even add your own request in our [tracking epic](https://gitlab.com/groups/gitlab-org/-/epics/297).\n\n## Community contributions\n\nWith our open integration framework we've seen members of the [GitLab community](/community/) contribute additional security scanners, help maintain the existing open source scanners we offer and expand the list of supported languages and frameworks we support. Our community contributors are helping every GitLab user have access to more accurate, sophisticated, and relevant security results. Here are some recent community contribution highlights:\n\n- [Mobile SAST support via MobSF](https://gitlab.com/gitlab-org/gitlab/-/issues/233777) (contribution by [@williams.brian-heb](https://gitlab.com/williams.brian-heb)) - [GitLab 13.5 Release MVP](/releases/2020/10/22/gitlab-13-5-released/#mvp)\n- [Adding Helm Chart support](https://gitlab.com/gitlab-org/gitlab/-/issues/36755) (contribution by [@agixid](https://gitlab.com/agixid))\n- [Performance improvements to Fuzz testing](https://gitlab.com/gitlab-org/security-products/analyzers/fuzzers/pythonfuzz/-/merge_requests/1) (contribution by [@jvoisin](https://gitlab.com/jvoisin))\n- [Updates to secret detection](https://gitlab.com/gitlab-org/gitlab/-/issues/205172) (contribution by [@tnir](https://gitlab.com/tnir))\n- [Dependency scanning buxfixes](https://gitlab.com/gitlab-org/gitlab/-/issues/205172) (contribution by [@fcbrooks](https://gitlab.com/fcbrooks))\n- [Updates to Security Scanner underlying operating systems](https://gitlab.com/gitlab-org/gitlab/-/issues/216781) (contribution by [@J0WI](https://gitlab.com/J0WI))\n- [Contributions for .NET Framework Support](https://gitlab.com/gitlab-org/security-products/analyzers/security-code-scan/-/merge_requests/14) (contribution by [@agixid](https://gitlab.com/agixid))\n- [See the full list of Secure community contributions](https://gitlab.com/gitlab-org/gitlab/-/issues?scope=all&utf8=%E2%9C%93&state=all&label_name[]=Community%20contribution&label_name[]=devops%3A%3Asecure)\n\nThe open source nature of GitLab allows the community to help improve, maintain, and contribute features within GitLab. This is the ultimate value of open source. Even if we don't offer something, you can always extend or modify the behavior of GitLab to accomplish your goal. When compared to closed-source Security vendors, this is a huge benefit. The impact these contributions have is massive as GitLab Secure is used by tens of thousands of customers and performs hundreds of thousands of security scans every month. If you are interested in contributing, check out our [contributor program](/community/contribute/) and [contributor documentation](https://docs.gitlab.com/ee/development/contributing/).\n\n## Integration partners\n\nCommunity contributions aren't the only way GitLab Secure is being extended. We have a variety of integration partners who provide security integrations that further expand the suite of security tools available to GitLab users. Check out the [GitLab Security integrations](/partners/#security) our partners offer. If you are a security vendor interested in integrating with GitLab, [join our partner program](/handbook/alliances/integration-instructions/) today.\n\n## Looking ahead\n\nWe've come a long way in the past few years with GitLab Secure and we're not done yet. Our [vision is bold (and open source)](/direction/secure/) and [our investment in security is large](https://internal.gitlab.com/handbook/product/investment/). [Security is a team effort](/direction/secure/#security-is-a-team-effort) and we hope you'll join us on our mission to help developers write more secure code.\n\n## Read more about GitLab SAST:\n\n* GitLab [Secure Direction](/direction/secure/)\n* Learn more about [integrating with GitLab Secure](https://docs.gitlab.com/ee/development/integrations/secure.html)\n* View the latest [October 2020 GitLab security trends](/blog/gitlab-latest-security-trends/)\n\nCover image by [Mitchell Luo](https://unsplash.com/@mitchel3uo) on [Unsplash](https://unsplash.com/s/photos/rowing-team)\n{: .note}\n",[109,9,875,267,1515,682],{"slug":5679,"featured":6,"template":686},"integrating-with-gitlab-secure","content:en-us:blog:integrating-with-gitlab-secure.yml","Integrating With Gitlab Secure","en-us/blog/integrating-with-gitlab-secure.yml","en-us/blog/integrating-with-gitlab-secure",{"_path":5685,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5686,"content":5692,"config":5698,"_id":5700,"_type":14,"title":5701,"_source":16,"_file":5702,"_stem":5703,"_extension":19},"/en-us/blog/introducing-gitlab-dedicated",{"title":5687,"description":5688,"ogTitle":5687,"ogDescription":5688,"noIndex":6,"ogImage":5689,"ogUrl":5690,"ogSiteName":670,"ogType":671,"canonicalUrls":5690,"schema":5691},"Introducing GitLab Dedicated, our new single-tenant SaaS offering","Learn more about this offering, why we developed it and how customers can be added to our limited availability waitlist.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682524/Blog/Hero%20Images/screenshot-2022-11-30-at-7.49.51-am.png","https://about.gitlab.com/blog/introducing-gitlab-dedicated","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing GitLab Dedicated, our new single-tenant SaaS offering\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David DeSanto, Chief Product Officer, GitLab\"}],\n        \"datePublished\": \"2022-11-30\",\n      }",{"title":5687,"description":5688,"authors":5693,"heroImage":5689,"date":5695,"body":5696,"category":726,"tags":5697},[5694],"David DeSanto, Chief Product Officer, GitLab","2022-11-30","\n\nToday, we are excited to officially announce the limited availability of GitLab Dedicated, a new way to use our enterprise DevSecOps platform as a single-tenant SaaS offering. This new offering provides all of the benefits of an enterprise DevSecOps platform, with an added focus on data residency, isolation, and private networking to meet compliance needs. \n\n## Navigating compliance complexities\n\nAt GitLab, we serve a wide variety of customers — from small start-ups and community organizations to the largest global enterprises — and we know that no single deployment model will serve the needs of all of our customers.\n\nGitLab customers have told us they need a SaaS offering that provides additional deployment control and data residency to meet stringent compliance requirements. We see this need with large enterprises and companies in regulated industries that are coming under increased scrutiny, facing global internet policy fragmentation, and are dealing with the expanding complexity of data governance. The need to be compliant and secure has never been greater.\n\nEven non-regulated organizations find compliance a real threat to productivity and profits. In our [2022 Global DevSecOps Survey](/developer-survey/), we found that operations professionals are increasingly responsible for all compliance, and a majority of them spend between one-quarter and one-half of their work week managing compliance and audits. That’s a 15% increase from 2021. \n\n## When multi-tenant SaaS is not an option\n\nIn many organizations, the cloud is a substantive way to consume enterprise applications without the overhead of self-hosting. But for some industries, the multi-tenant nature of cloud-based SaaS services makes it an impossible choice due to regulatory restrictions. Some organizations need more choice between how they manage their data and where that data sits, and these decisions shouldn't have to come at the expense of efficiency and productivity. \n\n## Balance compliance with speed and efficiency with single-tenant SaaS\n\nI’m excited that we are offering a new deployment option by making our DevSecOps platform available as a single-tenant SaaS solution. GitLab Dedicated provides all of the benefits of an enterprise DevSecOps platform with a focus on data residency, isolation, and private networking to meet compliance needs. With GitLab Dedicated, organizations can leverage the efficiency of the cloud while still getting a completely isolated instance — without the need to deploy and manage a DevSecOps platform and cloud infrastructure themselves.\n\n### Data residency and protection\n\nGitLab Dedicated enables organizations to respond to the increasing number of countries and regions that are establishing unique data residency rules. By choosing the [cloud region that works for them and their regional requirements](https://docs.gitlab.com/ee/subscriptions/gitlab_dedicated/#aws-regions-not-supported), organizations can keep their data local to meet data isolation and residency requirements. It’s an efficient way to stay compliant and performant without the overhead of self-hosting.  \n\nTo further protect customer data, GitLab Dedicated supports a secure, private connection between the organization’s network and our service. This means that users, data, and services have secure access to the isolated instance without exposing services directly to the internet.\n\n### Managed and hosted by GitLab\n\nGitLab Dedicated is not only single-tenant, region-based, and privately connected, but it’s also managed and hosted by GitLab and deployed in the customer’s cloud region of choice. Organizations can quickly realize the value of a DevSecOps platform without requiring staff to build out and manage infrastructure. Organizations get all of the benefits of GitLab — shorter cycle times, lower costs, stronger security and more productive developers — with lower total cost of ownership and quicker time to value than hosting themselves.\n\n## Join the waitlist\n\nI’m truly excited to announce limited availability of GitLab Dedicated, which will bring more flexibility and greater choice to enterprise customers and organizations in highly regulated industries that have complex compliance and data residency requirements. The offering provides the efficiencies of the cloud, but with infrastructure-level isolation and data residency controls. \n\n**As we scale this new offering, we are making GitLab Dedicated available by inviting customers to join our waitlist. You can learn more and join the waitlist [on our website](/dedicated/) and get more information about the direction of the offering and [the timeline to General Availability](/direction/saas-platforms/dedicated/).**\n",[9,728,1180],{"slug":5699,"featured":6,"template":686},"introducing-gitlab-dedicated","content:en-us:blog:introducing-gitlab-dedicated.yml","Introducing Gitlab Dedicated","en-us/blog/introducing-gitlab-dedicated.yml","en-us/blog/introducing-gitlab-dedicated",{"_path":5705,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5706,"content":5711,"config":5717,"_id":5719,"_type":14,"title":5720,"_source":16,"_file":5721,"_stem":5722,"_extension":19},"/en-us/blog/introducing-gitlab-serverless",{"title":5707,"description":5708,"ogTitle":5707,"ogDescription":5708,"noIndex":6,"ogImage":4005,"ogUrl":5709,"ogSiteName":670,"ogType":671,"canonicalUrls":5709,"schema":5710},"Announcing GitLab Serverless","The true value of serverless is best realized via a single-application DevOps experience – that's why we're launching GitLab Serverless.","https://about.gitlab.com/blog/introducing-gitlab-serverless","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Announcing GitLab Serverless\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Priyanka Sharma\"}],\n        \"datePublished\": \"2018-12-11\",\n      }",{"title":5707,"description":5708,"authors":5712,"heroImage":4005,"date":5714,"body":5715,"category":299,"tags":5716},[5713],"Priyanka Sharma","2018-12-11","\n\n[Serverless](/topics/serverless/) is the latest innovation in cloud computing that promises to alter the cost-benefit equation for enterprises. As our CEO, [Sid Sijbrandij](/company/team/#sytses) says, \"All roads lead to compute.\" There is a race among providers to acquire as many workloads from enterprises as possible, at the cheapest cost. The latter is where serverless comes in: serverless computing is an execution model in which the cloud provider acts as the server, dynamically managing the allocation of machine resources. Pricing is based on the actual resources consumed by an application, rather than on pre-purchased units of capacity.\n\nThis field began with the release of [AWS Lambda](https://en.wikipedia.org/wiki/AWS_Lambda) in November 2014. In the four short years since then, it has become a well-known workflow that enterprises are eager to adopt. Today, we are announcing [GitLab Serverless](/topics/serverless/) to enable our users to take advantage of the benefits of serverless.\n\n## GitLab Serverless is launching Dec. 22\n\nGitLab is the only single application for the entire [DevOps lifecycle](/topics/devops/). As part of that vision, we will release GitLab Serverless in GitLab 11.6, coming later this month, to allow enterprises to plan, build, and manage serverless workloads with the rest of their code from within the same GitLab UI. It leverages [Knative](https://cloud.google.com/knative/), which enables [autoscaling](https://en.wikipedia.org/wiki/Autoscaling) down to zero and back up to run serverless workloads on Kubernetes. This allows businesses to employ a multi-cloud strategy and leverage the value of serverless without being locked into a specific cloud provider.\n\nIn order to bring the best-in-class to our users, we partnered with [TriggerMesh](https://triggermesh.com/) founder [Sebastien Goasguen](https://twitter.com/sebgoa) and his team. Sebastien has been part of the serverless landscape since the beginning. He built a precursor to Knative, Kubeless. He is actively involved with the Knative community and understands the workflow from soup to nuts. Sebastien says, \"We are excited to help GitLab enable all their users to deploy functions directly on the Knative function-as-a-service clusters. We believe that these additions to GitLab will give those users the best possible experience for complete serverless computing from beginning to end.\"\n\n## \"Serverless first\"\n\nAs any attendees at [AWS re:Invent](/blog/aws-reinvent-recap/) would have noticed, the behemoth is putting all its energies behind serverless. We heard [stories from the likes of Trustpilot](https://www.computerworlduk.com/cloud-computing/how-trustpilot-takes-serverless-first-approach-engineering-with-aws-3688267/) about changing their engineering culture to \"serverless first.\" This is because serverless cloud providers save money by not having to keep idle machines provisioned and running, and are passing on the benefits to their customers. While this is amazing news, it is hard to truly embrace a workflow if it lives outside of developers' entrenched habits. GitLab has millions of users and is used by over 100,000 organizations, and with GitLab Serverless they can now enjoy the cost savings and elegant code design serverless brings, from the comfort of their established workflows.\n\nAs with all GitLab endeavors, making serverless multi-cloud and accessible to everyone is a big, hairy, audacious goal. Today, Knative can be installed to a Kubernetes cluster with a single click via the GitLab Kubernetes integration. It shipped in [GitLab 11.5](/releases/2018/11/22/gitlab-11-5-released/#easily-deploy-and-integrate-knative-with-gitlab).\n\n### How to activate GitLab Serverless\n\nStarting with the release of GitLab 11.6 on Dec. 22, the \"Serverless\" tab will be available for users as an alpha offering. Please do check it out and share your feedback with us.\n\n1. Go to your GitLab instance and pick your project of choice.\n2. Click on the `Operations` menu item in the sidebar.\n3. Pick `Serverless` to view the list of all the functions you have defined. You will also be able to see a brief description as well as the Knative cluster the function is deploying to.\n\n![Serverless list view](https://gitlab.com/gitlab-org/gitlab-ce/uploads/8b821d4aaa1bb75375dc54567a4313ad/CE-project__serverless-grouped.png \"Serverless list view\"){: .shadow.large.center}\n\nTo dig further, click into the function for more info.\n\n![function detail view](https://gitlab.com/gitlab-org/gitlab-ce/uploads/9e1e3893aa5369a2a165d1dd95c98dd8/CE-project__serverless--function-details.png \"function detail view\"){: .shadow.large.center}\n\nAll this goodness will be available Dec. 22. In the meantime, we would love to see you at [KubeCon Seattle](/events), where our product and engineering experts are attending to talk all things serverless with attendees. Hope to see you at booth S44!\n",[726,9,916,231,1477],{"slug":5718,"featured":6,"template":686},"introducing-gitlab-serverless","content:en-us:blog:introducing-gitlab-serverless.yml","Introducing Gitlab Serverless","en-us/blog/introducing-gitlab-serverless.yml","en-us/blog/introducing-gitlab-serverless",{"_path":5724,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5725,"content":5730,"config":5734,"_id":5736,"_type":14,"title":5737,"_source":16,"_file":5738,"_stem":5739,"_extension":19},"/en-us/blog/introducing-markdown-live-preview",{"title":5726,"description":5727,"ogTitle":5726,"ogDescription":5727,"noIndex":6,"ogImage":928,"ogUrl":5728,"ogSiteName":670,"ogType":671,"canonicalUrls":5728,"schema":5729},"GitLab's realtime Preview Markdown is an editor for everyone","With GitLab's new realtime Preview Markdown, technical and non-technical team members can more easily work together. Here's everything you need to know.","https://about.gitlab.com/blog/introducing-markdown-live-preview","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's realtime Preview Markdown is an editor for everyone\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Parker Ennis\"}],\n        \"datePublished\": \"2021-09-21\",\n      }",{"title":5726,"description":5727,"authors":5731,"heroImage":928,"date":4811,"body":5732,"category":726,"tags":5733},[2959],"\n\nFostering better, more meaningful collaboration is an integral part of DevOps and a key part of what GitLab, the complete DevOps Platform, unlocks for developers and their teams. While many developers or engineers feel more comfortable working locally on their machines and spend a majority of their time using a CLI to push code changes, with GitLab you can also use the [Web Editor](https://docs.gitlab.com/ee/user/project/repository/web_editor.html) or [Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/) to collaborate and edit content in a much easier, faster, and approachable way. \n\nStarting in [GitLab 14.2](https://about.gitlab.com/releases/2021/08/22/gitlab-14-2-released/), editing Markdown content in the Web Editor or Web IDE just got even better.\n\n### Introducing the real-time Preview Markdown editor\n\n[GitLab Flavored Markdown](https://docs.gitlab.com/ee/user/markdown.html) automatically renders Markdown content in an easy-to-read and easy-to-write plain text language. Although Markdown is inherently more “human-readable” and versatile when writing rich web content, Markdown files can become tricky to work with as they become more verbose and complex. \n\nEasy-to-read and easy-to-write means different roles with varying degrees of technical experience can collaborate on content more efficiently and seamlessly. However, previewing the rendered output of Markdown content to validate the accuracy of any changes has not been as intuitive, requiring an extra step to switch out of the Web IDE or Web Editor where the raw source code lives in order to view the changes from the Preview tab. Frequent context-switching back and forth between tabs to validate changes leads to wasted time and can be disruptive to the creative process while writing content.\n\nIn GitLab 14.2, now both the Web IDE and Web Editor include [an option to preview Markdown in real-time, in a single window](/releases/2021/08/22/gitlab-14-2-released/#create-split-markdown-preview). A side-by-side preview panel will display when editing Markdown with a click of a button that will toggle a split view panel in the editor and render the content on the page you’re working on as the changes are being made. \n\nHere’s an example of what this new functionality looks like:\n\n![Example of real-time Markdown Preview side-by-side panels](https://about.gitlab.com/images/blogimages/markdown-live-preview.png){: .shadow.small}\n\n#### How do I use it?\n\nIt’s very straightforward to start using the side-by-side preview. When you are editing any Markdown file, even a newly created one, you can right-click the editor and select **Preview Markdown** or use `Command/Control + Shift + P` to toggle a split-screen live preview of your Markdown content. From there, all you need to do is start writing or editing content and you’ll see your changes in real time!\n\n![Example of the Preview Markdown button in the static editor](https://about.gitlab.com/images/blogimages/markdown-live-preview-hotkey.png){: .shadow.small}\n\n#### Everyone can contribute\n\nAt GitLab, [everyone can contribute](/company/mission/#everyone-can-contribute) and we welcome feedback in any form. As we usher in the [new DevOps Platform era ](/blog/welcome-to-the-devops-platform-era/) and wave goodbye to the all-too-familiar \"DIY\" style of DevOps, we're excited to iterate and improve with our wider community. \n\n## What is Markdown?\n \nMarkdown is a lightweight markup language for formatting text using a plain editor text. It was created by John Gruber and Aaron Swartz in 2004. It is now one of the most popular markup languages and is used mainly by writers and programmers to help them take notes, write quickly, and develop website content without figuring out how to use the formatting toolbar in text editors. A big part of its appeal is that you don't have to have any knowledge of HTML to use Markdown to write and create web pages.\n \nMarkdown is platform-independent and can be used to create websites, documents, notes, books, presentations, emails, and more. \n \nThere is some school of thought that Markdown is easier to write than HTML, and it's easier for most people to read Markdown source than HTML source. In fact, experts say you can learn Markdown in as little as 10 minutes.\n \n## What is Markdown used for?\n \nMarkdown can be used to format code in GitLab. Creating a markdown file in GitLab requires creating a new file with the .md extension. Once in the new file, the code can be written in Markdown syntax. When the code is finished, you can commit the file to your Git repository.\nWhile not as feature-laden as Microsoft Word, Markdown lets you create basic documents and use a Markdown document authoring app to export formatted documents to PDFs or HTML files.\n \nUsing Markdown is different than using a [WYSIWYG](https://en.wikipedia.org/wiki/WYSIWYG) editor. For example, in an application like Word, changes are visible immediately. Markdown is different. When a Markdown-formatted file is created, you add Markdown syntax to the text to indicate which words and phrases should look different.\n \n[For example,](https://www.markdownguide.org/getting-started/) to distinguish a heading, add a number sign before it (e.g., # Heading One). Or add two asterisks before and after a phrase to put it in bold (e.g., **this text is bold**). \nBolden and italicize text in Markdown without needing the WYSIWYG interface.\n\n",[771,9,3993],{"slug":5735,"featured":6,"template":686},"introducing-markdown-live-preview","content:en-us:blog:introducing-markdown-live-preview.yml","Introducing Markdown Live Preview","en-us/blog/introducing-markdown-live-preview.yml","en-us/blog/introducing-markdown-live-preview",{"_path":5741,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5742,"content":5748,"config":5753,"_id":5755,"_type":14,"title":5756,"_source":16,"_file":5757,"_stem":5758,"_extension":19},"/en-us/blog/introducing-modelops-to-solve-data-science-challenges",{"title":5743,"description":5744,"ogTitle":5743,"ogDescription":5744,"noIndex":6,"ogImage":5745,"ogUrl":5746,"ogSiteName":670,"ogType":671,"canonicalUrls":5746,"schema":5747},"Adopt ModelOps within DevOps to solve data science challenges","The ModelOps stage of DevOps applies AI and ML to address complex data science challenges.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668437/Blog/Hero%20Images/faster-cycle-times.jpg","https://about.gitlab.com/blog/introducing-modelops-to-solve-data-science-challenges","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Adopt ModelOps within DevOps to solve data science challenges\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2022-01-21\",\n      }",{"title":5743,"description":5744,"authors":5749,"heroImage":5745,"date":5750,"body":5751,"category":769,"tags":5752},[2862],"2022-01-21","\nIn a [recent blog post](/blog/the-road-to-smarter-code-reviewer-recommendations/) discussing the progress of integrating novel machine learning (ML) algorithms into GitLab we introduced our new [ModelOps stage](/direction/modelops/). This stage is focused on enabling and empowering data science workloads on GitLab. GitLab ModelOps aims to bring data science into GitLab within existing features to make them smarter and more intelligent and empowering GitLab customers to build and integrate data science workloads within GitLab.\n\nAn interesting question we hear a lot is how will this be useful for DevOps professionals? So we wanted to dive into who exactly we’re building ModelOps features for and why. To begin, here is an overview of how we’ve chosen to structure our new ModelOps stage. \n\n## ModelOps: Enabling and empowering data science workloads\n\n![Chart of ModelOps stages](https://about.gitlab.com/images/blogimages/Screen_Shot_2022-01-19_at_1.11.36_PM.png){: .shadow}\n\nModelOps is about taking all the best practices we’ve learned building a DevOps platform and applying them to the unique challenges of AI and ML workloads. Our ModelOps stage is divided into three primary groups: DataOps, MLOps, and AI Assisted. Each group has specific jobs to be done and challenges. Part of the reason we chose this organization model is due to the different user personas we’re trying to solve problems for in each of these areas. Now let’s dive into the people in each group, as well as the challenges each group aims to solve. \n\n## DataOps: Get the data, clean it, and process it\n\nDataOps is focused on everything required to process data workloads, including fetching data, cleaning it, and processing it. You may have heard this called ELT, or Extract, Load, Transformation, of data. But DataOps is more than just the ELT, there are lots of other problems that come with data sources. For example, data located in many disparate systems in many formats and lacking common data definitions. Most data sources require a lot of processing to access, move, clean, and interpret data. We have specialists whose entire job is [all of the work to get data into usable states](https://online.hbs.edu/blog/post/data-life-cycle) so organizations can do something of business value with it. \n\nDepending on the organization, these data professionals may have different titles such as data engineer, data architect, or data analyst.  These data wranglers have many assorted jobs: aggregating disparate data sources, cleaning and shaping data into usable formats, making data available to the business, and even analyzing data and answering business questions.\n\nThe data experts leverage many tools such as ELT platforms, big data warehouses, data pipelines, and database technologies like SQL and elastic search. Data management tooling can be an extremely complex series of connections piping data in and out of various platforms. These challenges are the heart of the problems we’re aiming to solve.\n\n## MLOps: Do something useful with the data\n\nNext is MLOps, which is what most people associate with data science. MLOps aims to enable customer data science use cases, including accessing and interacting with data, AI/ML toolchain integrations, and compute environment integrations. Basically, everything that is required to build, test, train, and deploy AI/ML models into production systems. MLOps leverages math to solve problems using computing power to find patterns in the data that we just discussed with DataOps. \n\nData science teams feature professionals with titles such as data scientists, ML engineers, or ML specialists. These experts usually have a mix of higher-level math and statistics skills, software engineering, and basic DevOps skills. They can cobble together environments to build, train, test, and explore data science models to solve specific business problems.\n\nThe work data scientists do is more than just building ML models. They have to understand the business data and problems they are trying to leverage data science to solve. It’s usually very experimental and requires a lot of iteration to find a solution that solves a particular business problem in a useful way. It’s common for data scientists to spend a lot of time exploring and understanding datasets and the business problems organizations are hoping data science can solve. They then build and train AI/ML models, evaluate model output, and then iterate their models.\n\nAmong the common tools these data scientists use are Python notebooks, which allow them to leverage scripting to explore and manipulate data and try different modeling techniques. They also may use many open source ML and data science frameworks, as well as special data science platforms that help manage, version, interpret, and monitor models. Most of this work almost never happens in production environments. It happens on local machines or in cloud computing platforms where data scientists can leverage highly specialized compute, optimized for running data science models. That leaves an interesting challenge of how do you deploy their work to production systems.  Our last use case, DevOps, provides the solution. \n\n## AI Assisted: Leverage data to solve business problems \n\nWhile our AI Assisted group isn't specifically focused on any one user persona, we are planning to enrich existing GitLab features with ML. Our goal is to take features that require manual work to leverage and apply ML to automate these tasks. Tasks like assigning and labeling issues, choosing code reviewers, and even triaging and fixing security vulnerabilities. You can read more about our AI Assisted plans on our [direction page](/direction/ai-powered/) or check in on the status of our first Applied ML feature, [suggested reviewers](/blog/the-road-to-smarter-code-reviewer-recommendations/). Now that we've touched on improving GitLab for everyone, let's go back to GitLab's main persona, DevOps engineers.\n\n## DevOps: Build, test, and deploy software \n\nDevOps is probably the most understood use case that we’re trying to solve with our ModelOps stage. However, we’re focused on the intersection of DevOps and data science workloads. Specifically what happens when you need to deploy a data science model to a production system. GitLab’s DevOps platform is already an established and mature platform for building, testing, and deploying traditional software applications. But the software stacks of modern organizations are evolving and becoming more sophisticated, including leveraging ML. We’ve described some of the challenges and new personas that are involved with the development of data science workloads, but what happens when it’s time to go to production?\n\nToday, data science teams and DevOps engineers work in separate silos with very different skills sets and technology challenges. So when a data science team has a new ML model they want to push into a production software environment and integrate into a running application, in walks a whole new set of challenges. \n\nJust about every software company now has DevOps teams focused on repeatability, stability, and velocity of software development lifecycles. Everything relating to the design, build, testing, deployment, security, and monitoring of software from idea to deploy into a production system. These teams are usually comprised of software engineers and DevOps engineers. The people who write, build, and test code with repeatable CI/CD, allowing software teams to seamlessly develop software applications. \n\n## Helping them all work together\n\nOur goal with ModelOps is to help all of these people work together to build and deploy data-rich modern applications leveraging novel ML workloads. We want to bring data science into GitLab within existing features to make them smarter and more intelligent and to empower GitLab customers to build and integrate data science workloads in their own applications built and deployed with GitLab. Each of these groups has unique challenges and use cases that are interconnected. That’s part of what makes data science difficult. It has a lot of moving parts and crosses every aspect of modern software development lifecycles with very unique challenges. \n\nIf all of this is interesting to you, you may also enjoy watching our recent Contribute session, where we discuss more about what we plan to accomplish with our ModelOps stage, which you can watch on YouTube.\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n  \u003Ciframe src=\"https://www.youtube.com/embed/C08QVI99JLo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n_This blog post contains information related to upcoming products, features and functionality._\n\n_It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes._\n\n_As with all projects, the items mentioned in this blog post and linked pages are subject to change and delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab Inc._\n",[9,1339,683],{"slug":5754,"featured":6,"template":686},"introducing-modelops-to-solve-data-science-challenges","content:en-us:blog:introducing-modelops-to-solve-data-science-challenges.yml","Introducing Modelops To Solve Data Science Challenges","en-us/blog/introducing-modelops-to-solve-data-science-challenges.yml","en-us/blog/introducing-modelops-to-solve-data-science-challenges",{"_path":5760,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5761,"content":5767,"config":5772,"_id":5774,"_type":14,"title":5775,"_source":16,"_file":5776,"_stem":5777,"_extension":19},"/en-us/blog/introducing-the-gitlab-ci-cd-catalog-beta",{"title":5762,"description":5763,"ogTitle":5762,"ogDescription":5763,"noIndex":6,"ogImage":5764,"ogUrl":5765,"ogSiteName":670,"ogType":671,"canonicalUrls":5765,"schema":5766},"Introducing the GitLab CI/CD Catalog Beta","Discover, reuse, and contribute CI/CD components effortlessly, enhancing collaboration and efficiency when creating pipeline configurations.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099399/Blog/Hero%20Images/Blog/Hero%20Images/security-pipelines_4UHVIJlePT8rEzjvYkGYvi_1750099398604.jpg","https://about.gitlab.com/blog/introducing-the-gitlab-ci-cd-catalog-beta","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing the GitLab CI/CD Catalog Beta\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2023-12-21\",\n      }",{"title":5762,"description":5763,"authors":5768,"heroImage":5764,"date":5769,"body":5770,"category":791,"tags":5771},[2120],"2023-12-21","DevSecOps is all about speed – achieving rapid progress in software development. To succeed in DevSecOps, organizations require a well-functioning CI/CD pipeline that teams can utilize to automate their development workflows.\n\nHowever, crafting pipeline configurations with YAML can be intricate and challenging because YAML isn't a programming language, Developers may find themselves reinventing the wheel each time they try to create new configurations because they don't have visibility into existing configurations and work that others may have already done, resulting in inefficiency.\n\n[GitLab 16.7](https://about.gitlab.com/releases/2023/12/21/gitlab-16-7-released/) introduces the [CI/CD Catalog](https://docs.gitlab.com/ee/ci/components/#cicd-catalog) (Beta), with the goal of enhancing developer efficiency by addressing three main questions developers encounter when creating pipeline configurations:\n\n* Discoverability: Has someone already created a configuration for my task, and where can I find it?\n* Reusability: Once I find a suitable pipeline, how do I use it effectively?\n* Ease of contribution: I've created a useful configuration; how can I easily share it with the GitLab community?\n\n## What is the GitLab CI/CD Catalog?\n\nThe CI/CD Catalog serves as a centralized hub for developers and organizations to share pre-existing [CI/CD components](https://docs.gitlab.com/ee/ci/components/) and to discover reusable configurations that others may have already developed. Every component published by users will be part of a public catalog accessible to all users, regardless of their organization or project. \n\nThis approach promotes cross-organization collaboration, allowing the entire GitLab community to benefit from the wealth of CI components available. It's a powerful step forward in sharing knowledge among GitLab users, enabling developers to harness the collective expertise of the platform.\n\n## Easy component creation and publishing\n\nIn addition to reusing components, developers can contribute to the GitLab CI/CD community by creating their own components and publishing them in the catalog. This ensures that others can benefit from their expertise and encourages collaboration across the platform.\n\n## How to discover and use components\n\n**1. Opening the CI/CD Component Catalog**\n\nClick on “Search or go to...”\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750099406962.png)\n\nOpen the catalog by navigating to “Explore > CI/CD Catalog” or visit this [catalog page](https://gitlab.com/explore/catalog).\n\nUpon accessing the catalog, you'll find a list of CI/CD components projects contributed by your team, organization, or the wider GitLab community.\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750099406963.png)\n\n**2. Browsing components**\n\nNavigate through the list of components in the CI/CD Catalog or use the Search bar to find components related to a specific topic.\n\nEach component project contains one or multiple components. Opening a component project will display its documentation, providing details on all available components. This includes insights into how to use each component and understanding the expected input parameters.\n\n**3. Include the selected components in your .gitlab-ci.yml**\n\nNow that you've explored the catalog and selected the desired CI/CD components, integrate them into your project's CI/CD pipeline.\n\nFollow these steps to update your .gitlab-ci.yml file:\n\n1. Open the .gitlab-ci.yml file in your project for editing.\n2. Use the include keyword to add the selected components to your CI configuration. \n3. Ensure that the paths to the component YAML files are correct and specify the appropriate version for each component.\n4. In case the components have input parameters, review the component’s documentation to understand which inputs are required, and add them to your CI configuration.\n5. Save and commit your changes to the .gitlab-ci.yml file.\n\nHere is an example of YAML code that demonstrates how to include a few components and use them with input parameters.\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_1.15.48_PM_aHR0cHM6_1750099406965.png)\n\n## How to create and publish components\n\nHave you crafted a valuable configuration that you'd like to share and contribute to your team or the GitLab community? Here are the six steps to make it happen:\n\n**Step 1: Create a new project and set it as a component project**\n\n1. On the left sidebar, select **Search or go to** and find your project.\n2. On the left sidebar, select Settings > General.\n3. Expand Visibility, project features, permissions.\n4. Scroll down to CI/CD Catalog resource and select the toggle to set the project as a CI/CD Catalog resource.\n5. Ensure that your project description is filled out; this information will be showcased in the catalog, providing users with insights into the purpose and functionality of your components.\n6. Create a .gitlab-ci.yml file in the root of the repository. You will need this file to test and release the components as described in steps 4 and 5 below. Note: This step only needs to be done once for any project that contains components.\n\n**Step 2: Create the components**\n\n1. Create a /templates folder in the root directory of the project.\n2. In this templates directory, create one YAML template file (ending in .yml) for each component.   \n3. The template can optionally include a description of input arguments using the `spec` keyword if the component requires input parameters, and the definition of jobs, that may include references to values using the interpolation format $[[ inputs.input-name ]]. Ensure you use three dash lines between the spec header, and job definitions.\n\nHere is an example of a `deploy.yml` template that gets input parameters:\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_11.34.20_AM_aHR0cHM6_1750099406966.png)\n\nIn this template, we've defined two input parameters, `stage` and `environment`, both with default values. In the content section, a job is defined that interpolates these input arguments.\n\n**Step 3: Create components documentation** \n\nCreate a README.md file in the root of the project, including information about the components. Explain the component's functionality, detail input parameters, and provide illustrative examples. This ensures clarity for component consumers on how to use them.\n\nThis is an example of component documentation:\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750099406967.png)\n\nAdditional information can be found in our [CI/CD components](https://docs.gitlab.com/ee/ci/components/index.html#components-repository) documentation. \n\n**Step 4: Add tests to the components (recommended)**\n\nDeveloping a component follows a standard software development cycle with stages like build, test, and deploy. It's highly recommended to test your components before publishing them. Check out this example test, which queries the GitLab REST API to check whether a component job has been added to the pipeline. Feel free to use it, and consider adding more tests to ensure your components work as expected.\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_12.32.53_PM_aHR0cHM6_1750099406968.png)\n\nInclude all your test jobs in the **.gitlab-ci.yml** file in your Catalog project.\n\n**Step 5: Prepare your CI/CD configuration for publishing**\n\n1. Create a release job in the **.gitlab-ci.yml** file in the component project using the `Release` keyword.  See the job example:\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_12.34.27_PM_aHR0cHM6_1750099406969.png)\n\n__Note:__ Do not \"create release\" from GitLab UI since this soon won't be supported for a Component Catalog.\n\n2. We recommend adding this rule in the Release job; this will automatically trigger the Release job only when creating a git tag starts with digits in the project, following semantic release conventions (1.0.0 for example).\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_1.21.30_PM_aHR0cHM6_1750099406970.png)\n\n3. So this is how we recommend your job to look: \n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_12.37.09_PM_aHR0cHM6_1750099406970.png)\n\n4. To manually release components, add manual rule as below, so when the pipeline is triggered, someone will need to manually run the release job. \n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_12.38.18_PM_aHR0cHM6_1750099406971.png)\n\nHere is the release job with the `when:manual` rule:\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_12.41.00_PM_aHR0cHM6_1750099406972.png)\n\n**Step 6: Publish your components**\n\nOnce you are satisfied with your components, and all tests have passed successfully, it's time to publish a new version by creating a git tag, so they will be available in the CI/CD Catalog.\n\n1. Create a Git tag using the semantic versioning format \"MAJOR.MINOR.PATCH\". \n\n2. You can create tags through the UI by navigating to Code -> Tags -> New Tag, or via the CLI using `git tag`. \n\n3. Creating the tag will trigger a pipeline that runs the Release job if all tests pass successfully. The component project will then be assigned the version you defined in the tag, and it will appear in the catalog.\n\n### Example projects\n\n* [GitLab official components](https://gitlab.com/components)\n\n### Documentation \n\nFor more details on using components from the CI/CD Catalog and maximizing their potential within your projects, refer to the official [CI/CD Catalog documentation](https://docs.gitlab.com/ee/ci/components/#cicd-catalog). This documentation provides in-depth insights into the functionality.\n\n> [Take a tour](https://gitlab.navattic.com/cicd-catalog) of the GitLab CI/CD Catalog.\n\n_A special thank you to [Dov Hershkovitch](https://about.gitlab.com/company/team/#dhershkovitch) and [Fabio Pitino](https://gitlab.com/fabiopitino) for their invaluable content reviews and contributions to this blog post._",[109,726,9,978],{"slug":5773,"featured":6,"template":686},"introducing-the-gitlab-ci-cd-catalog-beta","content:en-us:blog:introducing-the-gitlab-ci-cd-catalog-beta.yml","Introducing The Gitlab Ci Cd Catalog Beta","en-us/blog/introducing-the-gitlab-ci-cd-catalog-beta.yml","en-us/blog/introducing-the-gitlab-ci-cd-catalog-beta",{"_path":5779,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5780,"content":5786,"config":5791,"_id":5793,"_type":14,"title":5794,"_source":16,"_file":5795,"_stem":5796,"_extension":19},"/en-us/blog/ios-cicd-with-gitlab",{"title":5781,"description":5782,"ogTitle":5781,"ogDescription":5782,"noIndex":6,"ogImage":5783,"ogUrl":5784,"ogSiteName":670,"ogType":671,"canonicalUrls":5784,"schema":5785},"Tutorial: iOS CI/CD with GitLab","Learn how to create an automated CI/CD pipeline using GitLab and fastlane.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669340/Blog/Hero%20Images/john-cameron-DgRb7aAGK4k-unsplash.jpg","https://about.gitlab.com/blog/ios-cicd-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: iOS CI/CD with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darby Frey\"}],\n        \"datePublished\": \"2023-06-07\",\n      }",{"title":5781,"description":5782,"authors":5787,"heroImage":5783,"date":5788,"body":5789,"category":791,"tags":5790},[1260],"2023-06-07","\n\nCreating an automated [CI/CD](https://docs.gitlab.com/ee/ci/) pipeline for an Apple iOS application can be challenging. Configuring build environments and managing code signing can be very time-consuming and error-prone, and when you get that all working, you still need a way to send your app to Apple.\n\nGitLab makes this much easier with [GitLab Mobile DevOps](https://docs.gitlab.com/ee/ci/mobile_devops.html).\n\nGitLab Mobile DevOps is a collection of features built right into GitLab to solve the biggest challenges mobile teams face in establishing a DevOps practice.\n\nIn this blog post, I’ll demonstrate how to set up an automated CI/CD pipeline using GitLab and [fastlane](https://fastlane.tools/).\n\n## Prerequisites\nTo get started, there are a few prerequisites you’ll need:\n\n* An Apple Developer account - [https://developer.apple.com/](https://developer.apple.com/)\n* Ruby and XCode command line tools installed on your local machine [https://docs.fastlane.tools/getting-started/ios/setup](https://docs.fastlane.tools/getting-started/ios/setup/) \n\n> Try out our [Android CI/CD with GitLab tutorial](/blog/android-cicd-with-gitlab/).\n\n## Reference project\nFor this walkthrough, we’ll use the iOS demo project for reference: [https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo)\n\n## Install fastlane\nIf you haven’t done so yet, the first step will be to install fastlane. Do this by creating a file in the root of your project called Gemfile. Give it the following contents:\n\n```\nsource \"https://rubygems.org\"\n\ngem \"fastlane\"\n```\n\nThen, from the terminal in your project, run:\n\n```\nbundle install\n```\n\nThis command will install fastlane, and all of its related dependencies.\n\n## Initialize fastlane\nNow that fastlane is installed, we can set it up for our project. Run the following command from the terminal in your project and choose Option No. 2 since we will be targeting Test Flight in this tutorial:\n\n```\nbundle exec fastlane init\n```\n\nRunning this command will create a new folder in your project called `fastlane`. This folder will contain two files `Appfile` and `Fastfile`. \n\n![Initialize Fastlane](https://about.gitlab.com/images/blogimages/2023-04-15-ios-cicd-with-gitlab/fastlane-init.png)\n\nThe Appfile contains the configuration information for the app, and the Fastfile has some sample code that we will replace later. See the fastlane docs for more information about the configuration details in the Appfile [https://docs.fastlane.tools/advanced/Appfile/](https://docs.fastlane.tools/advanced/Appfile/)\n\n## Initialize fastlane match\nThe next step will be to set up fastlane Match, which is the part of fastlane that handles code signing. For more information on fastlane match, see the docs [https://docs.fastlane.tools/actions/match/](https://docs.fastlane.tools/actions/match/ )\n\nWe’ll start by running the following command from the terminal in your project:\n\n```\nbundle exec fastlane match init\n```\n\nThis command will prompt you to choose which storage backend you want to use (select gitlab_secure_files) and to input your project path (for example: gitlab-org/gitlab). It will then generate a fastlane Matchfile configured to use your project as the storage backend for fastlane Match.\n\n![Initialize fastlane Match](https://about.gitlab.com/images/blogimages/2023-04-15-ios-cicd-with-gitlab/match-init.png)\n\n## Generate a project access token\nNext, you'll need a GitLab Access Token to use fastlane Match from your local machine. To create a project access token, visit the Access Tokens section under Settings in your GitLab project. Create a new token with maintainer access to the “api” scope.\n\nThen run the following command from the terminal in your project replacing “YOUR_NEW_TOKEN” with the access token you just generated:\n\n```\nexport PRIVATE_TOKEN=YOUR_NEW_TOKEN\n```\n\nThis will configure fastlane to use this access token when making fastlane Match requests to your project.\n\n## Generate signing certificates\nNow that fastlane Match is configured, we can use it to generate the signing certificates and provisioning profiles for our app and upload them to GitLab.\n\nNOTE: If you already have these files for your app, see the instructions in this blog post on how to use fastlane to import your existing code signing files [/blog/mobile-devops-with-gitlab-part-3-code-signing-for-ios-with-gitlab-and-fastlane/](/blog/mobile-devops-with-gitlab-part-3-code-signing-for-ios-with-gitlab-and-fastlane/).\n\nRun the following command from the terminal in your project to generate development code signing files and upload them to GitLab.\n\n```\nbundle exec fastlane match development\n```\n\nWhen this command completes, go to the CI/CD settings page in your project and scroll down to the Secure Files section to see the files that were just generated and added to your project.\n\nWhile we’re here, we can go ahead and do that same thing for the appstore code signing files. Run the following command to generate the appstore code signing files and upload them to GitLab.\n\n```\nbundle exec fastlane match appstore\n```\n\n## Update Xcode configuration\nWith the code signing files ready to go, we have one small change to make in Xcode. In your project in Xcode, go to the Signing & Capabilities section and disable automatically managing code signing. Then, select the appropriate provisioning profile and signing certificate from the list based on your build target. The certificates we just generated will show up in that list.\n\n![Configure Xcode Provisioning Profiles](https://about.gitlab.com/images/blogimages/2023-04-15-ios-cicd-with-gitlab/xcode.png)\n\nWith all of our code signing configuration in place, we can now move on to setting up the integration with the Apple App Store.\n\n## Apple App Store integration\nThe final bit of configuration is the Apple App Store integration. To do this, we’ll need to create an API key in App Store Connect. See the instructions here to create and download the key file to your location machine. This key should have the role of App Manager. [https://developer.apple.com/documentation/appstoreconnectapi/creating_api_keys_for_app_store_connect_api](https://developer.apple.com/documentation/appstoreconnectapi/creating_api_keys_for_app_store_connect_api)\n\nOnce the key is generated, go to Settings, Integrations in your project, and click on the integration for Apple App Store Connect. You’ll be asked to supply the issuer ID and key ID from App Store Connect, along with the key file you just downloaded. With all of that configuration in place, click the Test Settings button to ensure everything works. If it gives you an error, double check your settings and try again. Once it’s working, click Save Changes to save and activate the integration. \n\nWith the integration activated, the following CI variables are added to all pipelines on protected branches and tags:\n\n* `APP_STORE_CONNECT_API_KEY_ISSUER_ID`\n* `APP_STORE_CONNECT_API_KEY_KEY_ID`\n* `APP_STORE_CONNECT_API_KEY_KEY`\n\nThese CI variables can be used by fastlane or any custom tooling to interact with the Apple App Store to upload builds, or perform other API enabled tasks.\n\n## Fastfile\nWith all of our configuration in place, we can now drop in a sample Fastfile to show how to perform the build, sign, and release actions.\n\nFrom the [sample project](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo), copy the contents of the fastlane/Fastfile and paste it into the Fastfile in your project, replacing the existing content. \n\n[https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo/-/blob/main/fastlane/Fastfile](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo/-/blob/main/fastlane/Fastfile)\n\nThis sample Fastfile contains two lanes, which are actions fastlane can execute. The lanes in this file are `build` and `beta`. \n\n### Build\nThe build lane will perform just a couple of actions to `setup_ci`, `match`, and `build_app`. This will use the development certificate we generated with fastlane Match earlier to build and sign the app for development. \n\n### Beta\nThe beta lane takes a few more steps to `setup_ci`, `match`, `app_store_connect_api_key`, `increment_build_number`, `build_app`, and `upload_to_testflight`. This lane will use the appstore certificates we generated with faslane Match earlier to build and sign the app for an appstore release. This lane also uses the App Store Connect integration to connect to the app store to determine the next build number to use, and to upload the final build to Test Flight. \n\n### .gitlab-ci.yml\nWith the fastlane configuration ready to go, the last step is to hook it up to GitLab CI. \n\nFrom the [sample project](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo), copy the contents of the `.gitlab-ci.yml` file and paste it into the project. \n\n[https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo/-/blob/main/.gitlab-ci.yml](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo/-/blob/main/.gitlab-ci.yml )\n\nThis is a simplified CI configuration that created two CI jobs to run each of the lanes in fastlane on the GitLab macOS shared runners. The build job will run for all CI pipelines and the beta job will only be run on CI pipelines on the master branch. The beta job is also manually triggered, so you can control when the beta release is pushed to Test Flight. \n\nWith all of this in place, commit all of these changes and push them up to your project. The CI pipeline will kick off, and you can see these jobs in action. \n\nCover image by \u003Ca href=\"https://unsplash.com/@john_cameron?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">John Cameron\u003C/a> on \u003Ca href=\"https://unsplash.com/photos/DgRb7aAGK4k?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Unsplash\u003C/a>\n{: .note}\n",[9,109,978],{"slug":5792,"featured":6,"template":686},"ios-cicd-with-gitlab","content:en-us:blog:ios-cicd-with-gitlab.yml","Ios Cicd With Gitlab","en-us/blog/ios-cicd-with-gitlab.yml","en-us/blog/ios-cicd-with-gitlab",{"_path":5798,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5799,"content":5805,"config":5810,"_id":5812,"_type":14,"title":5813,"_source":16,"_file":5814,"_stem":5815,"_extension":19},"/en-us/blog/it-automation-developer-productivity",{"title":5800,"description":5801,"ogTitle":5800,"ogDescription":5801,"noIndex":6,"ogImage":5802,"ogUrl":5803,"ogSiteName":670,"ogType":671,"canonicalUrls":5803,"schema":5804},"How IT automation impacts developer productivity","See how IT automation promotes a healthier IT culture and unlocks next-level DevOps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670529/Blog/Hero%20Images/automate-retrospectives.jpg","https://about.gitlab.com/blog/it-automation-developer-productivity","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How IT automation impacts developer productivity\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-05-30\",\n      }",{"title":5800,"description":5801,"authors":5806,"heroImage":5802,"date":5807,"body":5808,"category":791,"tags":5809},[788],"2019-05-30","\n\nQuestion: If developers spend the bulk of their days on painful, manual tasks, would you say that’s the best use of their time? In a development environment that is always [trying to do more with less](/topics/devops/reduce-devops-costs/), manual processes are productivity killers.\n\nAutomation makes it possible for engineering talent to use their skills on projects that add real business value and contribute to long-term growth. In the world of QA, test automation is creating a modern strategy [focused on excellent user experiences](/blog/trends-in-test-automation/). IT automation makes it possible to deploy applications faster and increase developer productivity, making the DevOps lifecycle more seamless.\n\n\n## The right people doing the right tasks\n\nIT automation ensures businesses have the right people performing the right tasks, and that has some unexpected benefits. Directing developer talent toward strategic initiatives actually creates a healthier DevOps culture. When developers can work on challenges that are more aligned with their role, they’re likely to be happier and more motivated, and that in turn helps with retention. One of the top reasons developers leave is because [they feel unchallenged in their work](https://differential.com/insights/why-software-developers-leave-and-best-ways-to-retain-them/). IT automation lets developers use their skills for projects where they’re most suited.\n\nThere’s a cost benefit to IT automation, as well. If you have senior engineers working on basic maintenance, [you’re spending too much on maintenance](https://enterprisersproject.com/article/2017/12/5-factors-fueling-automation-it-now), period. Even if you limit these tasks to junior levels, you’re probably still spending too much. While there's a lot more to automation than reducing costs, it's an undeniable benefit.\n\nIf it can be automated, it probably should be.\n\n\n## Automating for growth\n\nAs organizations innovate and increase their deployments, they’ll need IT architecture that supports that growth. Could engineers manually develop and configure 50, 100, or even 200 servers? Sure. But what about 1,000 or 2,000? That’s where IT automation becomes a necessity for scalable workloads. Putting special focus on the handoffs between processes (where waste most often occurs) is how leaders can identify the best automation opportunities. [Value stream mapping](https://www.linkedin.com/pulse/automate-question-ricardo-coelho-de-sousa/) is a method used to uncover what should be fully automated, and what may only need partial automation in the interim.\n\nWithout the right IT automation, growth will undoubtedly suffer as teams need more and more staff to keep up with demand. Automation and collaboration are an essential part of operational efficiency, accelerating delivery, and innovating products. CI/CD is the link that connects developers and operations, and that automation helps developers teams build better software and vastly improves the handoff process.\n\n\n## Minimizing risk\n\nReducing manual work [minimizes the risk of human error](https://techbeacon.com/devops/how-take-architectural-approach-it-automation), which gives IT the ability to focus on mission-critical tasks rather than cleaning up mistakes. IT automation also adds a system of checks and balances, so if a mistake happens, errors can be rolled back painlessly.\n\nAutomation tools and containers can make security more efficient. [Kubernetes](/solutions/kubernetes/) not only manages container deployments, it can also orchestrate security tasks. “You really want automation, orchestration to help manage which containers should be deployed to which hosts … knowing which containers need to access each other; managing shared resources, and monitoring container health,” says Red Hat security strategist Kirsten Newcomer. “[As you scale up your use of containers and microservices, automation soon becomes a core need](https://enterprisersproject.com/it-automation).”\n\nRemoving the human error component gives developers the peace of mind to work at the pace they want.\n\n\n## Keeping up with innovation\n\nSpeaking of speed – in the (not so distant) past, developers had to write docs and notify teammates about changes in the cloud environment, share content about provisioning and de-provisioning, synchronize problems, and exchange emails. All of that took time. The fewer barriers developers have between code and deployment, the better.\n\n[DevOps tools have created a buffer that allows developers and operations teams to work independently](https://www.infoworld.com/article/3230285/how-devops-changes-dev-and-ops.html). Automation is just a continuation of that DevOps journey – developers can work in real time, and operations teams still procure hardware and manage servers, but at a larger scale. Automation works best when you have specific objectives in mind.\n\nThe team at Monkton had a goal: The moment code is checked in and reviewed, they wanted the testing, deployment, and the security vulnerability scanning lifecycles automated. They wanted their people to do what they do best but had a hodgepodge of tools that couldn’t work together. They brought in better tools to automate those processes, tied them into GitLab, and now they have the repeatability they need at the speed they want.\n\n[Read their story](/blog/monkton-moves-to-gitlab-customer-story/).\n{: .alert .alert-gitlab-purple .text-center}\n\nIT automation is what makes next-level DevOps possible and gives developers the opportunity to use their skills in ways that add real, long-term value. When organizations automate mundane, manual tasks, they save costs and create a healthy IT culture where developers are challenged and processes are efficient – a real win-win.\n\nAre you ready to explore the benefits of IT automation and increase developer productivity? [Just commit](/blog/application-modernization-best-practices/).\n\nPhoto by [Daniele Levis Pelusi](https://unsplash.com/photos/Pp9qkEV_xPk?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/automation?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,855,109],{"slug":5811,"featured":6,"template":686},"it-automation-developer-productivity","content:en-us:blog:it-automation-developer-productivity.yml","It Automation Developer Productivity","en-us/blog/it-automation-developer-productivity.yml","en-us/blog/it-automation-developer-productivity",{"_path":5817,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5818,"content":5824,"config":5829,"_id":5831,"_type":14,"title":5832,"_source":16,"_file":5833,"_stem":5834,"_extension":19},"/en-us/blog/iteration-on-error-tracking",{"title":5819,"description":5820,"ogTitle":5819,"ogDescription":5820,"noIndex":6,"ogImage":5821,"ogUrl":5822,"ogSiteName":670,"ogType":671,"canonicalUrls":5822,"schema":5823},"Why we scoped down to build up error tracking ","We dig into how shipping small iterations is accelerating delivery on our error tracking product.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665440/Blog/Hero%20Images/automate-ce-ee-merges.jpg","https://about.gitlab.com/blog/iteration-on-error-tracking","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we scoped down to build up error tracking \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-01-23\",\n      }",{"title":5819,"description":5820,"authors":5825,"heroImage":5821,"date":5826,"body":5827,"category":791,"tags":5828},[2002],"2020-01-23","When our vision for [error tracking](https://about.gitlab.com/direction/monitor/platform-insights/error_tracking/) is fully realized, the developers who use GitLab will be able to find and fix errors before their customers ever report them, all while staying in our tool. But waiting until our error tracking feature is pristine would just us slow down.\n\nInstead, the engineers and product managers on the [Monitor:Health](https://handbook.gitlab.com/handbook/engineering/development/ops/monitor/respond/) team work **iteratively** by shipping smaller changes as we move closer to achieving our vision for the error tracking feature.\n\n## What does it mean to work iteratively?\n\n\"[Iterating] means scoping down a task to deliver it sooner. So, it means making something smaller so you can get it done quicker,\" says [Sid Sijbrandij](/company/team/#sytses), CEO and co-founder of GitLab.\n\nWe made [iteration](https://handbook.gitlab.com/handbook/values/#iteration) one of our core company values because of the fundamental belief that even a small change is better than no change at all. And while iteration in engineering is already recognized as being effective, our organization aims to make iteration a component to every team’s workflow.\n\nIn the video below, Sid and [Christopher \"Leif\" Lefelhocz](https://about.gitlab.com/company/team/#christopher-l), senior director of development, share how the product and engineering teams worked together to speed up development on error tracking by breaking the engineering process down into small steps and iterating as they go.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/tPTweQlBS54\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWe followed up with the Monitor:Health team to talk about how product and engineering worked together to develop an iterative strategy for making improvements to our error tracking product, both in terms of how our product team built the plan for error tracking and how engineering shipped the [minimum viable change](https://handbook.gitlab.com/handbook/values/#minimal-viable-change-mvc) (MVC) to production.\n\n## How we created a product strategy for error tracking\n\nError tracking is a process whereby application errors are identified and fixed as quickly as possible. The way error tracking functions at GitLab today is [through integration with Sentry](https://about.gitlab.com/direction/monitor/platform-insights/error_tracking/), which aggregates errors, surfaces them in the GitLab UI, and provides the tools to triage and respond to the critical ones.\n\nToday, our error tracking feature is at the [minimal level of maturity](https://about.gitlab.com/direction/monitor/platform-insights/error_tracking/), meaning we still have plenty of work to do before this feature is viable.\n\n\"The goal was to be able to provide error tracking as a product and bring these processes closer to the development delivery workflow,\" said [Sarah Waldner](/company/team/#sarahwaldner), senior product manager on the Monitor:Health team.\n\nThe product team summarized what needs to be done to move [error tracking at GitLab from minimal to viable](https://gitlab.com/groups/gitlab-org/-/epics/1625) as part of a detailed [parent epic](https://docs.gitlab.com/ee/user/group/epics/#multi-level-child-epics). The parent epic essentially establishes product priorities by defining which use cases error tracking needs to solve in order for the product to be considered a viable feature. The next step was to define the core problems that users encounter with error tracking and double-check the solutions that should be used to solve those problems.\n\n\"Once we came up with these problems and validated those, we moved into a solution validation cycle whereby designers came up with different solutions and flows for these and then we tested them with different users,\" says Sarah. \"After we did all of that and have all of our solutions validated we broke it down into four different things that someone needs to do from a high level with Sentry.\"\n\nThose top four actions were divided into child epics which roll-up to the parent epic, and include:\n\n*   [The instrumentation or configuration of Sentry](https://gitlab.com/groups/gitlab-org/-/epics/2036)\n*   [Correlating errors](https://gitlab.com/groups/gitlab-org/-/epics/2035)\n*   [Resolving errors](https://gitlab.com/groups/gitlab-org/-/epics/2034)\n*   [Triaging errors](https://gitlab.com/groups/gitlab-org/-/epics/2029)\n\nBy breaking down the problems and establishing solutions, the team took an important step toward establishing their product development priorities. Contained in each of these child epics are other epics and issues which break down the solutions into the larger aspects.\n\n## Establishing development priorities\n\nThe team recognized that, in order to boost error tracking to viable, there needed to be a better way to resolve errors that are surfaced by Sentry within GitLab. The team created an epic for [resolving errors](https://gitlab.com/groups/gitlab-org/-/epics/2034), and outlined some of the key development priorities.\n\n\"So, to resolve errors, if you have an error that you need to fix, you might want to create an issue to track that work, respond to it, and close that issue in the general workflow,\" says Sarah. \"So within the resolving errors workflow part of the error tracking parent epic, we pose the idea of being able to manually open an issue from a Sentry error, which was then broken down further into where you do it from, and further again on the error detail page.\"\n\n![Resolve errors epic](https://about.gitlab.com/images/blogimages/resolve_errors_epic.png){: .shadow.medium.center}\nThe workflow for the resolve errors epic is broken down into multiple child epics, which correlate to different development projects.\n{: .note.text-center}\n\nThe team decided that we needed the ability to [create an issue within GitLab based on the errors detected by Sentry](https://gitlab.com/groups/gitlab-org/-/epics/2210) and that they wanted this function and button to appear on both the error list page as well as on the [error detail page](https://gitlab.com/groups/gitlab-org/-/epics/2210). The team then decided to make the error detail page the first priority.\n\n\"Through conversation, we were able to determine what is the bare minimum of value and broke it down as best as we could from frontend to backend, with the idea that it's better to ship something small that's not fully complete than (to ship) nothing at all,\" says [Clement Ho](/company/team/#ClemMakesApps), frontend engineering manager on Monitor:Health.\n\n## The \"Create an Issue\" button in three iterations\n\n\"Being able to open an issue from the error detail page seems really simple, but once you talk through what that workflow actually looks like, there are a lot more aspects to it than previously thought,\" says Sarah.\n\n![Open issue workflow](https://about.gitlab.com/images/blogimages/open_issue_epic.png){: .shadow.medium.center}\nBreaking the frontend and backend engineering into iterations shows just how much work needs to be done to ship even one minor component of the error tracking product.\n{: .note.text-center}\n\n### The \"Create an Issue\" button in stages\n\nClement was the architect behind the `Create an Issue` button frontend iterations. He explained that he wanted to take advantage of GitLab deploying frequently, and so he broke down the development process for the `Create an Issue` button into a series of small steps.\n\nThe [first iteration](https://gitlab.com/gitlab-org/gitlab/issues/36537) was simply to build the ability to create an issue from the error detail page. In this iteration, the `Create an Issue` button was simple and unstyled and clicking it led the user to a blank issue. While not overly helpful at this phase, it represents a good start in allowing someone to respond to an error.\n\n![Create an Issue button](https://about.gitlab.com/images/blogimages/create_an_issue_it1.png){: .shadow.medium.center}\nWhat the `Create an Issue` button will look like when it's done.\n{: .note.text-center}\n\nIn the [second iteration](https://gitlab.com/gitlab-org/gitlab/issues/36540), the user clicks `Create an Issue` and the issue comes pre-filled with the Sentry error title, description, and link. It’s still not styled and consistent with GitLab UI yet, but it’s possible to see more of the error context when creating an issue in response to the error.\n\nIn the [third iteration](https://gitlab.com/gitlab-org/gitlab/issues/36542), the GitLab UI gets cleaned up and the issue comes with proper formatting.\n\n\"Now, we are three issues into this and each one has been done in a couple of days and after the first couple of days, someone was able to create an issue,\" says Sarah. \"And that way we got the system much faster instead of first adding the button and then adding the experience of the new issue and then having all of the information in there styled.\"\n\n### Is it better to start with frontend or backend engineering?\n\nAs Christopher noted in his [conversation with Sid](https://www.youtube.com/watch?v=tPTweQlBS54), everything that Clement was working on in the first three iterations was frontend-focused; typically engineers start problem-solving from the backend.\n\n\"I love frontend first. I love interface first also because it helps everyone think about it,\" says [Sid in to Christopher regarding this project](https://www.youtube.com/watch?v=tPTweQlBS54). \"If you have something in the interface it's easier to understand for customers, for backend people, etc. So in the end what the customer sees is the product. One way to develop is to start with the readme or start with the press release. After that, the closest thing you can think of is the interface. So I think it's much better to have an interface built and then do the backend than vice versa. Even though I come from backend engineering.\"\n\nJust a few days after Clement started building the frontend of the `Create an Issue` button the backend team started building support in separate issues. The main priority was to build backend support that associates issues to errors so that users are not creating multiple issues for the same error. The engineers also built frontend support so the user can see that an issue was already created and linked to a particular error.\n\n## The power of iterative thinking\n\n\"One huge thing that came out of this is all team members now feel empowered to create issues and to just add them to the milestone and if they realize something is too big, they can create followups or second iterations,\" says Sarah.\n\nWhile the end goal is to build a viable error tracking product, the big vision simply cannot be achieved without smaller, incremental steps. While it is clear that the engineering teams embraced iteration, Sarah and the product team also recognized the strong strategic value of iterative product development.\n\nAt the same time, Clement wanted to take advantage of GitLab’s frequent deployments, but he also realized that by breaking down the engineering process into MVCs he could also drive up [merge request rate](https://handbook.gitlab.com/handbook/engineering/development/performance-indicators/#mr-rate) on the Monitor:Health frontend engineering team (the average number of merge requests per engineer merged per month) which is a [KPI](https://handbook.gitlab.com/handbook/engineering/development/performance-indicators/#mr-rate).\n\n![MR rate increases](https://about.gitlab.com/images/blogimages/mrs.png){: .shadow.medium.center}\nThe data shows an increase in the rate of merge requests on the Monitor:Health frontend engineering team.\n{: .note.text-center}\n\nThe data speaks for itself, since breaking down the product development process for error tracking into smaller iterations, the MR rate for Clement’s team has increased. 🎉\n\n## Scoping down to speed things up\n\nClement says that one of his key takeaways from this iterative development process was that GitLab ought to embrace iteration on the engineering side, but also iteration in product development. He is encouraging his team to ship MVCs more frequently, and plans to check his work by running through the process a few more times to iron out any wrinkles in the workflow.\n\nWhile the highly iterative approach to error tracking has been lauded by everyone from the senior director of development to our very own CEO, Clement acknowledges that this is still a work-in-progress.\n\n\"I think the cost is communication and information being spread out everywhere,\" Clement says.\n\nHe advises teams looking to adopt this highly iterative approach be extra disciplined at consolidating conversation on specific epics and issues within GitLab, otherwise, communication can get unwieldy, fast.\n\nCover photo by Max Ostrozhinskiy on Unsplash.\n{: .note}\n",[9,3173,1339,683],{"slug":5830,"featured":6,"template":686},"iteration-on-error-tracking","content:en-us:blog:iteration-on-error-tracking.yml","Iteration On Error Tracking","en-us/blog/iteration-on-error-tracking.yml","en-us/blog/iteration-on-error-tracking",{"_path":5836,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5837,"content":5843,"config":5849,"_id":5851,"_type":14,"title":5852,"_source":16,"_file":5853,"_stem":5854,"_extension":19},"/en-us/blog/its-time-to-put-the-sec-in-devsecops",{"title":5838,"description":5839,"ogTitle":5838,"ogDescription":5839,"noIndex":6,"ogImage":5840,"ogUrl":5841,"ogSiteName":670,"ogType":671,"canonicalUrls":5841,"schema":5842},"It’s time to really put the Sec in DevSecOps","Organizations may tack on security to DevOps but unless they wholly integrate it, they will miss out on DevSecOps benefits.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671091/Blog/Hero%20Images/lock.jpg","https://about.gitlab.com/blog/its-time-to-put-the-sec-in-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"It’s time to really put the Sec in DevSecOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Francis Ofungwu\"}],\n        \"datePublished\": \"2023-02-02\",\n      }",{"title":5838,"description":5839,"authors":5844,"heroImage":5840,"date":5846,"body":5847,"category":769,"tags":5848},[5845],"Francis Ofungwu","2023-02-02","\nWe all know that DevOps and security are intertwined. And a lot of lip service is paid to surface integrations between the two. But until your organization goes [all-in on a DevSecOps strategy](/blog/why-the-market-is-moving-to-a-platform-approach-to-devsecops/#understanding-devops-pain-points) – where Sec is wholly embedded with Dev and Ops, you will miss out on the benefits a holistic approach brings.\n\nToday, the friction between DevOps and security teams comes from objectives that, at first glance, seem diametrically opposed (spoiler alert: they aren’t). Developers want to create great products at the velocity the business requires, and security teams want to effectively manage risks using methodical frameworks that require some level of structure. Day-to-day collaboration between the two groups can be challenging because their workflows and incentives differ.\n\nIn [GitLab’s 2022 Global DevSecOps Survey](/developer-survey/), we found that developers are seeing security scanning increasing across all categories (SAST, DAST, container scanning, dependency scanning, and license compliance), but this uplift is not translating into vulnerability reduction, as 56% of respondents said it was difficult to get developers to actually prioritize fixing code.\n\nAnd so they stay in silos.\n\n## Separation between security and DevOps doesn’t work\n\nWe know through our experiences that security and DevOps often only come together in emergencies. When there is a high-risk incident, such as a breach, security and DevOps teams are forced together on endless incident calls that function more like a “get to know you” exercise driven by rudimentary questions: What does that app do? Why are you using that library with a vulnerability from 2010? What do you mean it’s not exploitable?\n\nWe can – and should – agree that emergencies are not the best time for this level of discovery. You wouldn’t want a firefighter asking if your building is up to code before they start putting out a fire. But due to the lack of frequent collaboration, development and security teams use incidents as the time to play catchup and really dig into the basics of the development lifecycle.\n\n## Sec is more than just a few letters between Dev and Ops\n\nConfusion in the industry hasn’t helped. The industry has come to recognize – and in some cases, exploit – the frustration of these silos. They will plop the “Sec” in between Dev and Ops and market a laundry list of point solutions that solve only a small portion of the problem, and leave DevOps and security teams with a [complex toolchain](/the-source/platform/devops-teams-want-to-shake-off-diy-toolchains-a-platform-is-the-answer/) to manage and maintain. The alarming rate of cyber attacks and breaches in the headlines makes it obvious this approach is not working. So what’s the issue?\n\nI liken where we are now to the challenges that the healthcare industry faced a decade ago in trying to convince physicians of the benefits of hand hygiene. At the time, in the U.S., healthcare-associated infections affected more than 2 million people every year, while compliance with required hygiene standards by healthcare workers was below 40%, [an article from that time period](https://www.hcinnovationgroup.com/home/blog/13020327/the-freakonomics-of-behavior-change-in-healthcare) states. A Los Angeles hospital, aiming to solve this problem, was requiring a 100% hygiene compliance rate among its physicians – should have been a simple task among a population that understands the poor outcomes related to noncompliant behavior, right? No. Several carrot-and-stick approaches to changing behavior of the physicians yielded mixed results.\n\nRelying on humans to change their behavior can be fruitless, the researchers found, according to the article: “Organizations should focus instead on innovations through technology or design.” In other words, we should not rely on behavior change from individuals to drive meaningful, long-lasting transformation. We need to use technology as the invisible hand that reinforces the right behavior and enacts course correction when we deviate from expected actions.\n\nThe corollary is that in the tech industry, we have evangelized for [security and DevOps to be together](https://about.gitlab.com/solutions/security-compliance/) and have talked about why it makes sense (improved software supply chain security, management of threat vectors, and adherence to compliance requirements, for example). When we share the [vision of DevSecOps](/topics/devsecops/) there are head nods and agreements that this unification is the right thing to do for the good of the business, but when it comes down to it, the actual implementation is lacking.\n\n## What it means to be fully DevSecOps\n\nDevSecOps has to be a practice in every sense of the word. It can’t be theory or an academic exercise. DevSecOps should be an implementation of cultural, organizational, and technical changes designed to optimize delivery and maintenance of software. Characteristics of DevSecOps will include:\n- Reducing the time required to deliver quality software.\n- Automating processes required to identify, categorize, and remediate software bugs.\n- Designing the culture and operations of dev, sec, and ops and unifying these functions through values and workflows.\n\nFor DevSecOps as a practice to work, all stakeholders involved in the design, development, and maintenance of software need to commit to transparent collaboration at scale.\n\nWhat this means in action:\n\n- Eliminating one-way communication of security requirements: controls should be programmatically enforced and consumable via APIs.\n- Implementing policy as code: For adoption and consistency, the desired cultural shift and expectations have to be programmatically enforced.\n- Creating a unified view of threats at every level of the development lifecycle: All stakeholders should have insight to the same information that details the quality of the code. Having separate security scanners only operated by the security team does not drive collaboration.\n- Supporting in-context training inside of the development process: Build better developers by offering near real time evidence of vulnerabilities in their environment and code.\n- Reducing the amount of time developers spend in painful audits by investing in immutable development artifacts that evidence use of controls throughout the lifecycle.\n\nAt GitLab, we believe we are strongly positioned to accelerate your organization’s DevSecOps transformation. Our platform helps unify DevSecOps teams and drive the cultural, process, and governance programs required to deliver value to organizations seeking a more effective and sustainable way to develop better, more secure software faster.\n\nCover image by [Georg Bommeli](https://unsplash.com/@calina?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/ybtUqjybcjE?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[875,9,479],{"slug":5850,"featured":6,"template":686},"its-time-to-put-the-sec-in-devsecops","content:en-us:blog:its-time-to-put-the-sec-in-devsecops.yml","Its Time To Put The Sec In Devsecops","en-us/blog/its-time-to-put-the-sec-in-devsecops.yml","en-us/blog/its-time-to-put-the-sec-in-devsecops",{"_path":5856,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5857,"content":5863,"config":5868,"_id":5870,"_type":14,"title":5871,"_source":16,"_file":5872,"_stem":5873,"_extension":19},"/en-us/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment",{"title":5858,"description":5859,"ogTitle":5858,"ogDescription":5859,"noIndex":6,"ogImage":5860,"ogUrl":5861,"ogSiteName":670,"ogType":671,"canonicalUrls":5861,"schema":5862},"Jenkins to GitLab: The ultimate guide to modernizing your CI/CD environment","Learn how to migrate from Jenkins to the integrated CI/CD of the GitLab DevSecOps Platform to deliver high-quality software rapidly.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663000/Blog/Hero%20Images/tanukilifecycle.png","https://about.gitlab.com/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Jenkins to GitLab: The ultimate guide to modernizing your CI/CD environment\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2023-11-01\",\n      }",{"title":5858,"description":5859,"authors":5864,"heroImage":5860,"date":5865,"body":5866,"category":769,"tags":5867},[2120],"2023-11-01","\nIn today's dynamic landscape of software development, certain requirements have become paramount for delivering high-quality software rapidly. These requirements include the need for cloud compatibility, faster development cycles, improved collaboration, containerization, enhanced development experiences, and the integration of AI-driven capabilities for better efficiency and speed. Jenkins, a longstanding and respected continuous integration (CI) tool, has admirably played a role in many teams' software development for years. However, as more teams adopt DevOps/DevSecOps strategies for their software delivery, leveraging the integrated CI that is available in a DevSecOps platform like GitLab can provide benefits that Jenkins does not. \n\nSome organizations find themselves hesitating to migrate, not because they doubt the benefits of a top-tier [CI/CD](https://about.gitlab.com/topics/ci-cd/) solution such as GitLab, but due to the complexities of their existing Jenkins implementations. It's understandable that such a transition can seem daunting. \n\nIn this blog, you'll find several migration strategies to help transition from Jenkins to GitLab and make the process smoother and more manageable.\n\n## Migrating to GitLab\nIt's become evident that for organizations seeking a CI/CD solution that can seamlessly support their evolving demands, GitLab emerges as a powerful game-changer. Let's explore why transitioning to this advanced platform is transformative for Jenkins users.\n\n### Why migrate to GitLab \nBefore we delve into the migration approaches, let's take a moment to understand GitLab CI and what makes it a compelling choice for modern CI/CD needs.\n\n> Try GitLab CI/CD today with [a free trial of Ultimate](https://gitlab.com/-/trials/new).\n\n### GitLab CI overview\nGitLab CI is an integral part of the GitLab [AI-powered](https://about.gitlab.com/gitlab-duo/) DevSecOps Platform, which offers a comprehensive and unified solution for DevSecOps and CI/CD. GitLab's design revolves around streamlining development workflows, fostering collaboration, enhancing security, and ensuring scalability.\n\n### Key features of GitLab CI\nThese are the key features of GitLab CI:\n- **Unified platform:** GitLab CI is more than just a CI/CD tool; it's part of a broader ecosystem that includes source code management, project management, security features, analytics and more. This unified platform streamlines workflows and enhances collaboration among development teams.\n- **Containerization and orchestration:** GitLab CI/CD is designed with containerization in mind, offering native support for Docker and Kubernetes. This enables seamless integration of container technologies into your CI/CD pipelines.\n- **Security by design:** Security is a top priority, and GitLab CI incorporates features such as static code analysis and vulnerability scanning to help teams identify and address security issues early in the development process.\n- **GitOps principles:** GitLab CI aligns with [GitOps principles](https://about.gitlab.com/blog/the-ultimate-guide-to-gitops-with-gitlab/), emphasizing version-controlled, declarative configurations for infrastructure and application deployments. This approach enhances the reliability and repeatability of deployments.\n\nGet familiar with GitLab CI with this tutorial:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/WKR-7clknsA?si=T21Fe10Oa0rQ0SGB\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWith that understanding of GitLab CI's capabilities, let's explore the migration steps and strategies for Jenkins users looking to leverage the benefits of GitLab CI.\n\n## A recommended step-by-step Jenkins-to-GitLab CI migration\nWhen considering a migration from Jenkins to GitLab CI, we strongly recommend following a well-structured, step-by-step approach to ensure a seamless transition. Here's our recommended process:\n1. **Pipeline assessment:** Start by conducting a comprehensive inventory of all your existing pipelines in Jenkins. This initial step will help you gain a clear understanding of the scope and complexity of the migration.\n2. **Parallel migration:** Begin the migration process by selecting individual pipelines and moving them to GitLab CI one at a time. Continue to maintain the use of Jenkins for your ongoing work during this transition to minimize disruptions.\n3. **Code verification:** We advise beginning with verification checks in CI. Run both the Jenkins and GitLab CI pipelines in parallel. This dual approach allows you to directly compare the two workflows and identify any issues in the new GitLab workflows. During this phase, keep the GitLab workflow as an optional choice while Jenkins remains required.\n4. **Continuous validation:** After running both pipelines in parallel for a full iteration, thoroughly evaluate the outcomes from each pipeline. This evaluation should consider various factors, including status codes, logs, and performance. \n5. **GitLab CI transition:** As you gain confidence in the reliability and effectiveness of GitLab CI through the parallel runs, make the transition to the GitLab CI workflow as the required standard while Jenkins continues to operate in the background.\n6. **Jenkins phaseout:** After a second iteration, when you are confident in the performance and stability of GitLab CI, you can begin to remove the Jenkins job from your code verification pipeline. This successful transition will enable you to retire Jenkins from this particular aspect of your CI/CD process.\n\nThis recommended approach ensures that your migration is a gradual evolution, allowing you to identify and address any issues or discrepancies before fully committing to GitLab CI. Running Jenkins and GitLab CI pipelines in parallel provides valuable insights and ensures the effective streamlining of your CI/CD processes.\n\n## Preparing for migration: Training and communication\nTo ensure a smooth and successful migration from Jenkins to GitLab CI, follow these essential steps:\n- **Stakeholder communication:** Start by announcing your migration plans and timelines to all relevant stakeholders. This includes DevOps teams, developers, and QA engineers. Transparency in communication is crucial to ensure that everyone understands the objectives and expectations of the migration.\n- **Knowledge-level training:** Conduct knowledge-level training sessions for your teams to promote GitLab CI adoption.\nCover topics such as using GitLab CI, understanding the YAML syntax, and how to create a basic pipeline.\nProvide team members with the knowledge and skills necessary to navigate the new GitLab CI environment effectively.\n- **Hands-on learning:** Encourage hands-on learning by pairing up developers.\nCreate opportunities for them to learn from each other's experiences throughout the migration process.\n\nBy following these instructions for training and communication, you'll build a strong foundation for a successful migration, empowering your teams to adapt and thrive in the new environment.\n\n## 3 Jenkins-to-GitLab CI migration strategies\nThere are different strategies to consider. These three strategies offer flexibility, allowing organizations to choose the path that best aligns with their specific needs and resources. Let's explore these strategies in detail to help you make an informed decision about which one suits your organization best.\n\n### Migration Strategy 1: Using GitLab CI for new projects\nThe first migration strategy involves a gradual transition. While you maintain your existing Jenkins infrastructure for ongoing projects, you introduce GitLab CI for new projects. This approach allows you to harness the modern features of GitLab CI without disrupting your current work.\n\n#### Benefits of Migration Strategy 1\nThe benefits of this approach include the following:\n- New projects can leverage GitLab CI's advanced features right from the start. \n- This strategy minimizes the risk of disrupting existing workflows, as your existing Jenkins setup remains intact.\n- Your team can gradually adapt to GitLab CI, building confidence and expertise without the pressure of an immediate full-scale migration.\n\n#### Challenges of Migration Strategy 1\nThe challenges of this approach include the following:\n- Operating two CI/CD platforms simultaneously can introduce complexity, especially in terms of integration and team collaboration.\n- Managing projects on different platforms may require careful coordination to ensure consistency in processes and security practices.\n\nThis strategy offers a smooth and manageable transition by allowing you to harness GitLab CI's strengths for new projects, while your existing Jenkins infrastructure continues to support ongoing work.\n\n### Migration Strategy 2: Migrating only strategic projects\nIn this strategy, you identify specific projects within your organization that stand to benefit the most from the capabilities of GitLab CI. Instead of preparing for a wholesale migration, you start by focusing your efforts on migrating these strategically selected projects first.\n\n#### Benefits of Migration Strategy 2\nThe benefits of this approach include the following:\n- By concentrating on key projects, you can realize significant improvements in those areas where GitLab CI aligns with specific needs.\n- This approach reduces the complexity of migrating everything at once, minimizing the potential for disruptions.\n- You can gradually build confidence with GitLab CI and its benefits before considering further migrations.\n\n#### Challenges of Migration Strategy 2\nThe challenges of this approach include the following:\n- Even though you're not migrating all projects, the chosen projects' migration can still be intricate and require careful planning.\n- Ensuring seamless collaboration between projects on different platforms may require additional attention.\n\nThis strategy allows you to maximize the impact of GitLab CI by focusing on strategic areas, minimizing risk, and gradually gaining experience with the new tool.\n\n### Migration Strategy 3: Migrating everything\nThe third strategy is a comprehensive migration where you commit to moving all your CI/CD processes, projects, and workflows to GitLab CI. This approach aims for uniformity and simplification of CI/CD across all projects. This strategy can benefit from taking an iterative approach. Consider starting with new projects, followed by migrating strategic projects, and then leverage your growing knowledge and experience with GitLab CI to complete the migration of remaining projects. \n\n#### Benefits of Migration Strategy 3\nThe benefits of this approach include the following:\n- Uniform CI/CD processes across all projects can streamline administration and maintenance, reducing complexity.\n- You can take full advantage of GitLab CI's modern capabilities, from Infrastructure as Code to enhanced security features.\n- As your projects grow, GitLab CI is designed to handle increased demands, ensuring long-term scalability.\n\n#### Challenges of Migration Strategy 3\nThe challenges of this approach include the following:\n- A full-scale migration can be intricate, requiring meticulous planning and implementation.\n- The transition may disrupt ongoing projects and require a significant time investment.\n- Investment in training and potential tool migration expenses should be considered.\n\nOpt for this approach if uniformity and consolidation of CI/CD processes are a high priority, and you have the resources to execute a full migration.\n\nThe migration strategy you select should align with your organization's specific needs and circumstances. In all cases, the ultimate goal is to enhance your development process with modern CI/CD tools like GitLab CI, which offers scalability, infrastructure automation, security, and collaboration features that align with today's development needs.\n\n## Technical insights: How the migration works\nMoving your CI/CD workflows from Jenkins to GitLab CI is a transformative journey, and understanding how it works is vital for a successful transition.\n\n### Understanding the configurations: Jenkinsfile vs. .gitlab-ci.yml\nThe heart of your CI/CD pipeline lies in the configurations defined in your Jenkinsfile (for Jenkins) and .gitlab-ci.yml (for GitLab CI). While there are some similarities between these configuration files, there are notable differences as well.\n\n#### Similarities\n- Both files define the stages, jobs, and steps of your CI/CD process.\n- You specify the desired build, test, and deployment steps in both files.\n- Environment variables and settings can be configured in either file.\n\n#### Differences\n- Jenkinsfile uses Groovy for scripting, while .gitlab-ci.yml uses YAML. This change in language affects the way you write and structure your configurations.\n- The process of defining pipelines is more intuitive in .gitlab-ci.yml, with a cleaner, more human-readable syntax.\n- GitLab CI provides a wide range of built-in templates and predefined jobs, simplifying configuration and reducing the need for custom scripting.\n\n### Manually converting the pipeline configuration\nCurrently, migrating your existing Jenkins pipelines to GitLab CI is typically done manually. This means analyzing your Jenkinsfile and re-creating the equivalent configurations in .gitlab-ci.yml. While there are similarities in the concepts and structure, the differences in syntax and the specific capabilities of each platform require careful consideration during the migration.\n\n## Strategic planning for a smooth transition\nMigrating from Jenkins to GitLab CI requires meticulous planning to ensure a seamless transition. It's crucial to assess the disparities between the two systems and evaluate their impact on your workflow, considering aspects like security, cost, time, and capacity.\n\nOnce you've identified these differences and devised your migration strategy, break down the migration into key steps. These include setting up GitLab CI pipelines, securely transferring data from Jenkins to GitLab CI, and integrating GitLab CI into your existing tools and processes. \n\n## Case study: A seamless transition for Lockheed Martin\nLet's look at a real-world case study to illustrate the effectiveness of the \"Migrate Everything\" strategy. [Lockheed Martin](https://about.gitlab.com/customers/lockheed-martin/), the world’s largest defense contractor, had been using Jenkins for several years. As their project portfolio expanded, they realized that their Jenkins implementation with a wide variety of DevOps tools was becoming increasingly complex to manage. They were also eager to adopt modern CI/CD capabilities that Jenkins struggled to provide.\n\nIn collaboration with GitLab, Lockheed Martin decided to undertake a comprehensive migration to GitLab CI. Their goals included achieving consistency in their CI/CD processes, simplifying administration and maintenance, and taking full advantage of The GitLab Platform’s robust features.\n\nThe comprehensive migration strategy proved to be a resounding success for Lockheed Martin. With GitLab CI, they not only streamlined their CI/CD processes but achieved remarkable results. **They managed to run CI pipeline builds a staggering 80 times faster, retired thousands of Jenkins servers, and reduced the time spent on system maintenance by a staggering 90%. This monumental shift resulted in a significant increase in efficiency and productivity for Lockheed Martin.**\n\nThis case study showcases how a comprehensive migration strategy can be effective for organizations looking to leverage GitLab capabilities across all their projects.\n\nFor more in-depth insights into Lockheed Martin's successful transition to GitLab and how it streamlined their software development processes, check out [the detailed case study](https://about.gitlab.com/customers/lockheed-martin/).\n\n## GitLab documentation and support\nFor those embarking on this migration journey, GitLab offers documentation to guide you through the process. You can find valuable resources in GitLab's [official documentation](https://docs.gitlab.com/ee/ci/migration/jenkins.html).\n\nIn addition to documentation, GitLab's Professional Services team is available to assist organizations in their migrations. They bring expertise and experience to ensure a smooth transition. Whether it's understanding the nuances of Jenkinsfile to .gitlab-ci.yml conversion or optimizing your CI/CD workflows, their support can be invaluable.\n\n> Try GitLab CI/CD today with [a free trial of Ultimate](https://gitlab.com/-/trials/new).\n",[978,277,1181,231,2243,9],{"slug":5869,"featured":6,"template":686},"jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment","content:en-us:blog:jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment.yml","Jenkins Gitlab Ultimate Guide To Modernizing Cicd Environment","en-us/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment.yml","en-us/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment",{"_path":5875,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5876,"content":5881,"config":5886,"_id":5888,"_type":14,"title":5889,"_source":16,"_file":5890,"_stem":5891,"_extension":19},"/en-us/blog/jenkins-one-year-later",{"title":5877,"description":5878,"ogTitle":5877,"ogDescription":5878,"noIndex":6,"ogImage":1861,"ogUrl":5879,"ogSiteName":670,"ogType":671,"canonicalUrls":5879,"schema":5880},"Jenkins: One year later","With new acquisitions and the launch of CloudBees SDM, is Jenkins trying to become another all-in-one?","https://about.gitlab.com/blog/jenkins-one-year-later","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Jenkins: One year later\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-09-20\",\n      }",{"title":5877,"description":5878,"authors":5882,"heroImage":1861,"date":5883,"body":5884,"category":679,"tags":5885},[788],"2019-09-20","\n\nIt’s been a little over a year since we wrote about [how GitLab CI compares with the three variants of Jenkins](/blog/how-gitlab-ci-compares-with-the-three-variants-of-jenkins/). How have things changed – and how much has stayed the same?\n\n## Acquisitions\n\nIn April 2019, [CloudBees acquired Electric Cloud](https://www.businesswire.com/news/home/20190418005393/en/CloudBees-Acquires-Market-Leader-Electric-Cloud-Creating), a market leader in continuous delivery. This acquisition brought application release automation, continuous delivery, and continuous deployment under the Cloudbees umbrella through two of Electric Cloud’s premier products: ElectricFlow and ElectricAccelarator.\n\nThis acquisition came a little more than a year after [CloudBees acquired Codeship](https://techcrunch.com/2018/02/06/cloudbees-acquires-codeship-as-devops-consolidates/), another startup focused on continuous integration and delivery. These investments in continuous delivery tools are all about creating value. Because Jenkins doesn’t have continuous delivery built-in, it has to offer integrations with other tools (or acquire them) in order to offer that functionality. Acquisitions go a little deeper than just setting up an API, and are a lot more expensive. Could the acquisition of these two CD platforms give Jenkins the ability to offer CI/CD in their core product in the future?\n\n## Jenkins X\n\nThere has been a strong push by certain vendors to create a solution for combined CI/CD to match the capabilities of GitLab. GitHub developed GitHub Actions while CloudBees supported the development of Jenkins X, for example. Jenkins X was developed to automate continuous delivery pipelines to Kubernetes and cloud-native environments. [According to the Jenkins X website](https://jenkins-x.io/), “Rather than having to have deep knowledge of the internals of Jenkins X Pipeline, Jenkins X will default awesome pipelines for your projects that implement fully CI and CD.”\n\n## JenkinsWorld\n\nIn his [opening Keynote at JenkinsWorld 2018](https://www.youtube.com/watch?v=qE3tfS7k1VI&t=2s), CloudBees CTO Kohsuke Kawaguchi discussed some of the known unreliability of Jenkins and discussed how Cloud Native Jenkins could address some of these problems by removing the single point of failure and creating a more distributed system.\n\nAt JenkinsWorld 2019, [CloudBees offered an early preview of its CloudBees SDM Platform](https://www.businesswire.com/news/home/20190814005028/en/CloudBees-Presents-Software-Delivery-Management-SDM--).\n\nSource code management brings visibility and cross-functional collaboration into the SDLC, something that (until now) CloudBees could only offer through a plug-in. This new platform is a part of the CloudBees objective to be an end-to-end platform.\n\nWhat was most interesting was this quote from Sacha Labourey, CEO and co-founder of CloudBees:\n\n>“Organizations need a way to eliminate silos – to truly realize their vision of becoming software-first companies. This vision is Software Delivery Management and we are building the cohesive system our customers want. It will connect product stakeholders and development teams with the rest of the business, provide the intelligence and insights they all need to build software faster and provide increased value to their customers.”\n\nWe couldn’t agree more. ;)\n\n## A push for consolidation\n\nWith the acquisitions of Codeship and Electric Cloud, as well as the announcement of CloudBees SDM, it’s clear that CloudBees/Jenkins is pushing to be an end-to-end SDLC solution for its users. We’re seeing this throughout the industry: Idera purchasing Travis CI, Oracle acquiring Werker, JFrog’s acquisition of Shippable, and the launch of GitHub Actions just last month. Either through acquisitions or adding new features, [the app development industry is in a push for consolidation](/blog/built-in-ci-cd-version-control-secret/).\n\nToolchains get in the way of organizations enabling faster software delivery and realizing their maximum business impact. Where CloudBees/Jenkins has faltered is in its instability, mainly due to the thousands of third-party plugins it supports and the maintenance headaches they cause. At GitLab, we enable SDM, packaging, delivery, monitoring, and security in the product itself without the plugins.\n\nBecause [transparency is one of our values](https://handbook.gitlab.com/handbook/values/), we proudly display other DevOps tools directly on our website with [head-to-head comparisons](/devops-tools/jenkins-vs-gitlab/) so that organizations can know which platform works best for their needs.\n\nCompetition makes everyone else better, and with CloudBees/Jenkins amping up their consolidation efforts, how does that compare to us as an already all-in-one platform? We invite you to join us for a demo so you see how GitLab CI/CD compares to Jenkins firsthand.\n\n[See demo of GitLab CI/CD vs. Jenkins](/blog/migrating-from-jenkins/)\n{: .alert .alert-gitlab-purple .text-center}\n",[109,9],{"slug":5887,"featured":6,"template":686},"jenkins-one-year-later","content:en-us:blog:jenkins-one-year-later.yml","Jenkins One Year Later","en-us/blog/jenkins-one-year-later.yml","en-us/blog/jenkins-one-year-later",{"_path":5893,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5894,"content":5900,"config":5906,"_id":5908,"_type":14,"title":5909,"_source":16,"_file":5910,"_stem":5911,"_extension":19},"/en-us/blog/journey-to-the-outer-loop",{"title":5895,"description":5896,"ogTitle":5895,"ogDescription":5896,"noIndex":6,"ogImage":5897,"ogUrl":5898,"ogSiteName":670,"ogType":671,"canonicalUrls":5898,"schema":5899},"Journey to the Outer Loop","A space journey from Earth to a world where Product Managers don't exist","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665839/Blog/Hero%20Images/devops.png","https://about.gitlab.com/blog/journey-to-the-outer-loop","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Journey to the Outer Loop\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kenny Johnston\"}],\n        \"datePublished\": \"2021-01-20\",\n      }",{"title":5895,"description":5896,"authors":5901,"heroImage":5897,"date":5903,"body":5904,"category":1359,"tags":5905},[5902],"Kenny Johnston","2021-01-20","\n\n{::options parse_block_html=\"true\" /}\n\n\n\n🚀 Your DevOps journey, like any good space journey, spans vast distances. As your space captain, I’m here to remind you, we have a long way to go. DevOps by its continuous improvement definition isn’t a one stop destination. Today, in the true spirit of continuous improvement we’re rightly focused on our immediate next step along the journey. The next steps involve familiar friends - our fellow technical professionals who design, code, test, secure, deploy and operate software.  On our space journey they are our local solar system - they’re Mercury, Venus, Earth, Mars, Jupiter and Saturn. These planetary neighbors are concentrating on improved feedback and collaboration amongst each other.  We’re optimizing the efficiency of software creation. We’ve even built complete teams to perform this integrated activity holistically. That is DevOps culture! \n\nThat is also just the beginning of DevOps. In DevOps we call this solar-system collaboration the Inner Loop. The important feedback loop that brings us confidence in the technical abilities of our software. Think of things like reliability, scalability, observability, velocity. Those are critical abilities to keep up with our competition, ship software faster and accelerate. This inner loop is about efficiency, are we using the right build strategy? Continuous improvement of our inner loop is possible today using collaboration concepts like continuous integration, continuous deployment, [DevSecOps](/topics/devsecops/) as well as pipelines, infrastructure and observability as code. \n\n![Inner Loop](https://about.gitlab.com/images/blogimages/2021-outer-loop-journey/inner_loop.png)\n\nThere is, somewhere beyond our solar-system, a galactic Outer Loop. In this Outer Loop we gain confidence in the value capabilities of our product. Imagine things like usability, adoptability, stickiness and competitiveness. This outer loop is about effectiveness, are we building the right things? These capabilities do more than just enable us to keep up with the competition and deliver value to our users - they are the measures of whether we are doing so. This is the next step in DevOps!\n\n![Outer Loop](https://about.gitlab.com/images/blogimages/2021-outer-loop-journey/outer_loop.png)\n\nI’m going to take you on a journey to this Outer Loop - what it looks like today, what constellations of tools are available for it, and what a more complete Outer Loop could look like. Buckle up!\n\n#### What it looks like today\n\nIn today’s mostly uncharted Outer Loop - the space is disorganized at best. Ask most modern software development organizations and they will tell you they are struggling to integrate tools to enable this feedback loop. An organization who has figured out their inner loop thanks to CI/CD  is now struggling with how to get feedback to the teams. Today DevOps teams are:\n* Using Progressive Delivery techniques such as canary deployments and feature flags to perform some A/B or hypothesis testing\n* Struggling to connect quantitative feedback in the form of user data into actionable insights from the loop\n* Collecting qualitative user feedback in the form of NPS scores, user research, feature requests in disparate silos with limited access\n* Filtering feedback through a single individual’s brain, typically a product manager, in order to process it for prioritization\n* Lacking understanding of the efficiency of the types of feedback they receive\n* Stitching together multiple tools to try to ensure feedback gets into the right hands\n\nSome of the tools for various parts of this outer loop are:\n* Collection - Survey Monkey, MiPanel, Heap, Chorus.ai, NPS, Support Tickets\n* Analysis - Qualtrics, Pendo.io, Gainsight\n* Insights - Dovetail\n* Prioritization - Aha!, Trello\n\n#### Constellation of Tools\n\nToday’s Outer Loop is a bit like an alien world. We don’t yet have the tools to organize and make sense of it. What would happen if we did?\n\nAn effective Outer Loop - one built for optimizing outcomes for our users - would easily allow for the kinds of activities sought by Product Managers. Activities like lean product delivery, hypothesis testing, and prioritization frameworks. In fact - if you gave these tools to the real value creators - engineers, testers, designers, operators - you wouldn’t need Product Managers! \n\nImagine, beyond our solar-system a world where there were no Product Managers. Feedback is collected and tools readily available for the creators to analyze it.  That feedback includes qualitative and quantitative feedback from users, buyers and competition not just about what the product does today - but what it could do tomorrow. The creators iterate on a prioritization framework that is continuously improved. They ship experiments and have tools which empower them to designate an experiment a success. If successful they can rapidly promote an experiment. The primary input - which should be evident already - is a business goal. \n\n#### Are we there yet?\n\nThe reality is, this future world isn’t far off. Product focused DevOps teams have these point solutions at their disposal today. What they are missing is many of the same qualities that teams struggled within in the Inner Loop: A common platform to perform these activities that didn’t reinforce silos and create significant effort to stitch disparate tools together.\n\nAs one of the leading Value Stream Delivery Platforms - this is exactly where Gitlab is headed. We’re focused on completing the Galactic Outer Loop for you! Imagine a world where investment decision wasn’t subjective, ad-hoc, single tracked, and not data-driven! Imagine, no product managers! I’d invite you to [collaborate with us on that vision](https://gitlab.com/gitlab-org/gitlab/-/issues/299315). To Infinity and Beyond!\n",[9,1180],{"slug":5907,"featured":6,"template":686},"journey-to-the-outer-loop","content:en-us:blog:journey-to-the-outer-loop.yml","Journey To The Outer Loop","en-us/blog/journey-to-the-outer-loop.yml","en-us/blog/journey-to-the-outer-loop",{"_path":5913,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5914,"content":5919,"config":5925,"_id":5927,"_type":14,"title":5928,"_source":16,"_file":5929,"_stem":5930,"_extension":19},"/en-us/blog/just-commit-launch",{"title":5915,"description":5916,"ogTitle":5915,"ogDescription":5916,"noIndex":6,"ogImage":1193,"ogUrl":5917,"ogSiteName":670,"ogType":671,"canonicalUrls":5917,"schema":5918},"Let’s talk about commitment","What possibilities could you unlock by just making the choice, committing, and moving forward?","https://about.gitlab.com/blog/just-commit-launch","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Let’s talk about commitment\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Todd Barr\"}],\n        \"datePublished\": \"2019-02-18\",\n      }",{"title":5915,"description":5916,"authors":5920,"heroImage":1193,"date":5922,"body":5923,"category":679,"tags":5924},[5921],"Todd Barr","2019-02-18","\n\nWe’re now solidly into 2019. Commitments you made to yourself, your health, your productivity, your career, your budget, or whatever the case may be – they’re probably becoming harder to keep. This pattern of making resolutions, being on our best behavior for a while, falling off the wagon, returning to our ways, then starting the whole process over in the new year is all too familiar.\n\nWith [50 percent of digital transformation efforts stalled in 2018](https://mktg.forrester.com/predictions-2019), you’ve likely experienced your own version of this at work, and are probably even somewhere in that cycle right now.\n\nThe thing is, commitment unlocks new potential. You often don’t get to the good stuff until you make that commitment – whether it’s committing to months of training and discipline, then experiencing the euphoria of completing your first marathon, or committing to your partner and building a life together.\n\nIn the software space, making that commitment can be the difference between paying lip service to DevOps transformation and actually realizing its promises. Making big changes, especially at an organizational level, is daunting. The trick is to commit to the process, not just to the goal. [Focusing on the processes and behaviors that support the goal is key to success](https://www.scienceofpeople.com/goal-setting/), so having a clear plan of attack rather than an abstract objective to achieve is what makes all the difference.\n\nHere at GitLab, we committed to being [all-remote](/company/culture/all-remote/) – allowing us to hire the best people, no matter where in the world they might be or at what times they choose to work. We went all in on [asynchronous communication](/handbook/communication/#internal-communication), conscientiously documenting everything so we could collaborate across time zones and borders. We committed to a monthly release cycle, a decision which has seen us ship, to date, 88 consecutive new releases, allowing us to work with a short feedback loop and make small adjustments and iterations along the way. It was our commitment to the process, to having a single vision and steadily marching toward it, that enabled us to build a single application for the entire DevOps lifecycle with an all-remote team.\n\nSo this is what we’re asking you to do! Just commit. To software modernization. To faster cycle times. To secure apps. And because commitment is easier when you have a plan, and accountability, we’re here to support you on the journey. Over the coming weeks, we will be rolling out a series of blog posts and guides to help you make meaningful, lasting change in your organization. From tips and success stories on how to modernize your application architecture, to finally getting on top of technical debt, and building more secure applications, we’re working with our experts, customers, and community to help you along the way.\n\nObviously, commit has a double meaning for us. Git unlocked a whole new way to collaborate on software with the humble commit. Now, at GitLab, committing unlocks a whole lot more value – faster time to market, more secure code, more modern applications. We’re asking you to just commit to these. [Are you up for the challenge?](/blog/application-modernization-best-practices/)\n\n## #JustCommit\nSo, you're committing to starting something new this year. Hooray! 🎉 It's always easier to stick to something with a buddy – tell us your commitments by tweeting us [@gitlab](https://twitter.com/gitlab) using #JustCommit, and we'll do our best to help (and enter you into our swag giveaway)! The [giveaway](/community/sweepstakes/) lasts through April, but we want to keep you committing all year long.\n",[9,1789,728],{"slug":5926,"featured":6,"template":686},"just-commit-launch","content:en-us:blog:just-commit-launch.yml","Just Commit Launch","en-us/blog/just-commit-launch.yml","en-us/blog/just-commit-launch",{"_path":5932,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5933,"content":5939,"config":5945,"_id":5947,"_type":14,"title":5948,"_source":16,"_file":5949,"_stem":5950,"_extension":19},"/en-us/blog/keeping-your-development-dry",{"title":5934,"description":5935,"ogTitle":5934,"ogDescription":5935,"noIndex":6,"ogImage":5936,"ogUrl":5937,"ogSiteName":670,"ogType":671,"canonicalUrls":5937,"schema":5938},"DRY development: A cheatsheet on reusability throughout GitLab","How to follow the DevOps principle of 'don't repeat yourself' to optimize CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683555/Blog/Hero%20Images/drylights.jpg","https://about.gitlab.com/blog/keeping-your-development-dry","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DRY development: A cheatsheet on reusability throughout GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Noah Ing\"},{\"@type\":\"Person\",\"name\":\"Joe Randazzo\"}],\n        \"datePublished\": \"2023-01-03\",\n      }",{"title":5934,"description":5935,"authors":5940,"heroImage":5936,"date":5942,"body":5943,"category":791,"tags":5944},[5941,5272],"Noah Ing","2023-01-03","\nMore than 20 years ago, the book [The Pragmatic Programmer](https://pragprog.com/titles/tpp20/the-pragmatic-programmer-20th-anniversary-edition/) brought attention to the DRY principle, or “Don’t Repeat Yourself.\" This principle is defined as every piece of knowledge must have a single, unambiguous, authoritative representation within a system.\n\nThe main problem to solve here is minimizing duplication. As a development project is bombarded with new requests or changing requirements, DevOps teams must balance between development of net-new features or maintaining existing code. The important part is how to reduce duplicate knowledge across projects.\n\nThis tutorial explores the mechanisms throughout GitLab that leverage the DRY principle to cut down on code duplication and standardize on knowledge. To see working examples of reusability in action, take a look at this [repository](https://gitlab.com/guided-explorations/gitlab-ci-yml-tips-tricks-and-hacks/dry-repository-a-cheatsheet).\n\n## Minimizing duplication in CI/CD\n\n### include\n[`include`](https://docs.gitlab.com/ee/ci/yaml/index.html#include) can be used to transform a single .gitlab-ci.yml file into multiple files to improve readability and minimize duplication. For example, testing, security, or deployment workflows can be broken out into separate templates. This also allows [ownership](https://docs.gitlab.com/ee/user/project/codeowners/) of the files.\n\n\n```yaml\ninclude:\n  - template: CI/Build.gitlab-ci.yml\n  - template: CI/Test.gitlab-ci.yml\n  - template: CI/Security.gitlab-ci.yml\n  - template: CD/Deploy.gitlab-ci.yml\n\n```\n\n### YAML anchors\n[YAML anchors](https://docs.gitlab.com/ee/ci/yaml/yaml_optimization.html#anchors) can be used to reduce repeat syntax and extend blocks of CI workflow, including jobs, variables, and scripts.\n\n```yaml\n.test_template: &test_suite\n  image: ruby:2.6\n\nunit_test:\n  \u003C\u003C: *test_suite\n  script:\n    - echo \"Running a test here\"\n\nend_to_end_test:\n  \u003C\u003C: *test_suite\n  script:\n    - echo \"Running a test here\"\n\nsmoke_test:\n  \u003C\u003C: *test_suite\n  script:\n    - echo \"Running a test here\"\n```\n\n### extends\n[`extends`](https://docs.gitlab.com/ee/ci/yaml/index.html#extends) is similar to anchors with additional flexibility and readability. The major difference is it can be used with `includes`.\n\n```yaml\n\n.prepare_deploy:\n  stage: deploy\n  script:\n    - echo \"I am preparing the deploy\"\n  only:\n    - main\n\ndeploy_to_dev:\n  extends: .prepare_deploy\n  script:\n    - echo \"Deploy to dev environment\"\n  environment: dev\n\ndeploy_to_production:\n  extends: .prepare_deploy\n  script:\n    - echo \"Deploy to production environment\"\n  when: manual\n  environment: production\n```\n\n### !reference\n[`!reference`](https://docs.gitlab.com/ee/ci/yaml/yaml_optimization.html#reference-tags) enables the selection of keyword configuration from other job sections and reuse in the current session.\n\n```yaml\n.vars:\n  variables:\n    DEV_URL: \"http://dev-url.com\"\n    STAGING_URL: \"http://staging-url.com\"\n\n.setup_env:\n  script:\n    - echo \"Creating Environment\"\n\n.teardown_env:\n  after_script:\n    - echo \"Deleting Environment\"\n\nintegration_test:\n  variables: !reference [.vars, variables, DEV_URL]\n  script:\n    - !reference [.setup_env, script]\n    - echo \"Run Test\"\n  after_script:\n    - !reference [.teardown_env, after_script]\n\nperformance_test:\n  variables: !reference [.vars, variables]\n  script:\n    - !reference [.setup_env, script]\n    - echo \"Run Test\"\n  after_script:\n    - !reference [.teardown_env, after_script]\n```\n\n### Downstream pipelines\n[Downstream pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html) enable the breakout of microservices and their pipelines. A .gitlab-ci.yml file can be used for each service, and when a file or directory is changed, only that pipeline needs to be triggered improving the awareness and readability of what’s deploying.\n\n```yaml\nui:\n  trigger:\n    include: ui/.gitlab-ci.yml\n    strategy: depend\n  rules:\n    - changes: [ui/*]\n\nbackend:\n  trigger:\n    include: backend/.gitlab-ci.yml\n    strategy: depend\n  rules:\n    - changes: [backend/*]\n```\n\n![Dynamic child pipeline](https://about.gitlab.com/images/blogimages/2022-02-01-parent-child-vs-multi-project-pipelines/parent-child.png){: .shadow}\n\n### CI/CD variables\n[CI/CD variables](https://docs.gitlab.com/ee/ci/variables/) can be scoped to a specific level, including the project, group, instance level, or .gitlab-ci.yml level. The values can be stored and reused across a group for project inheritance or overwritten at the project level.\n\n```yaml\nvariables:\n  PROJECT_LEVEL_VARIABLES: \"I am first in line in precedence\"\n  GROUP_LEVEL_VARIABLES: \"I am second in line\"\n  INSTANCE_LEVEL_VARIABLES: \"I am in third place\"\n  GITLAB_CI_YML_LEVEL_VARIABLES: \"I am last in line of precedence\"\n\n```\n\n## Creating consistent code reviews across multiple teams\n\n### Description templates\n[Description templates](https://docs.gitlab.com/ee/user/project/description_templates.html) enable teams to define a consistent workflow for issues or merge requests. For example, the MR template can define a checklist for rolling out to a feature to ensure it’s documented, quality tested, and reviewed by appropriate team members. Here are [MR templates](https://gitlab.com/gitlab-org/gitlab/-/tree/master/.gitlab/merge_request_templates) that GitLab team members use daily.\n\n```md\n\u003C!-- These templates can be set at the instance or group level to share amongst the organization: https://docs.gitlab.com/ee/user/project/description_templates.html#set-instance-level-description-templates -->\n\n## What does this MR do?\n\n\u003C!-- Briefly describe what this MR is about. -->\n\n## Related issues\n\n\u003C!-- Link related issues below. -->\n\n## Create a checklist for the author or reviewer\n- [ ] Optional. Consider taking this writing course before publishing a change.\n- [ ] Follow the documentation process stated here.\n- [ ] Tag this user group if this applies.\n\n\n\u003C!-- Quick Actions - See https://docs.gitlab.com/ee/user/project/quick_actions.html#issues-merge-requests-and-epics for a list of all the quick actions available. -->\n\n\u003C!-- Add a label to assign a specific workflow using scoped labels -->\n/label ~documentation ~\"type::maintenance\" ~\"docs::improvement\" ~\"maintenance::refactor\"\n\n\u003C!-- Apply draft format automatically -->\n/draft\n\n\u003C!-- Assign myself or a usergroup -->\n/assign me\n```\n\n### Project templates\n[Project templates](https://docs.gitlab.com/ee/user/group/custom_project_templates.html) can be used to define an initial project structure for when new services are being developed. This gives a consistent starting point for projects that come equipped with the latest file configurations and defaults.\n\n### File templates\n[File templates](https://docs.gitlab.com/ee/administration/settings/instance_template_repository.html) are similar to project templates but are default files to choose from when adding a new file to your repository. The team then can quickly choose from files that have best practices baked in and organization defaults.\n\n## Defining a Pipeline Center of Excellence project for CI/CD workflows\n\nAs you 'productionize' your CI/CD workflows, it’s recommended to create a “Pipeline Center of Excellence” project that contains templates, containers, or other abstracted constructs that can be adopted throughout the organization. This project contains file or CI/CD templates that have the best practices or well-formed workflows defined for development teams to quickly adopt (includes) without recreating the wheel. To explore this in practice, visit [Pipeline COE](https://gitlab-org.gitlab.io/professional-services-automation/pipelinecoe/pipeline-templates/#/) documentation written by the GitLab Professional Services team.\n\nHave a reusable component to suggest or that we missed? Add a comment to this blog post or suggest a change to this file!\n\n## Related posts\n- [How to keep up with CI/CD best practices](https://about.gitlab.com/blog/how-to-keep-up-with-ci-cd-best-practices/)\n- [How to become more productive with GitLab CI](https://about.gitlab.com/blog/how-to-become-more-productive-with-gitlab-ci/)\n- [A visual guide to GitLab CI/CD caching](https://about.gitlab.com/blog/a-visual-guide-to-gitlab-ci-caching/)\n\nCover image by [Federico Beccari](https://unsplash.com/@federize?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://www.unsplash.com).\n",[976,977,9,2243],{"slug":5946,"featured":6,"template":686},"keeping-your-development-dry","content:en-us:blog:keeping-your-development-dry.yml","Keeping Your Development Dry","en-us/blog/keeping-your-development-dry.yml","en-us/blog/keeping-your-development-dry",{"_path":5952,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5953,"content":5959,"config":5964,"_id":5966,"_type":14,"title":5967,"_source":16,"_file":5968,"_stem":5969,"_extension":19},"/en-us/blog/key-organizational-models-for-devops-teams",{"title":5954,"description":5955,"ogTitle":5954,"ogDescription":5955,"noIndex":6,"ogImage":5956,"ogUrl":5957,"ogSiteName":670,"ogType":671,"canonicalUrls":5957,"schema":5958},"5 key organizational models for DevOps teams","DevOps teams can be organized in multiple ways. Identify the one that fits your organization.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667194/Blog/Hero%20Images/2020-11-19-integration-management-header.jpg","https://about.gitlab.com/blog/key-organizational-models-for-devops-teams","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 key organizational models for DevOps teams\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Johanna Ambrosio\"}],\n        \"datePublished\": \"2022-03-08\",\n      }",{"title":5954,"description":5955,"authors":5960,"heroImage":5956,"date":5961,"body":5962,"category":679,"tags":5963},[4963],"2022-03-08","\nIf you’re just getting started with DevOps, there are several team organizational models to consider.\n\nA few key points to keep in mind as you design your team structure:\n\n- The organizational model you start with should change as you add more people, [different DevOps roles](/blog/how-to-build-out-your-devops-team/), and more projects. Expect to keep iterating as you go.\n\n- The ultimate goal of DevOps is to spread the message, tools, and processes throughout the company so that, eventually, everyone is working “the DevOps way.” At some point, if your approach is successful, DevOps as a separate group will disappear.\n\n- The model you begin with should depend on how many projects or products you’re working on, the size of your teams, and the size of your company. \n\n- Keep your team size small, with three to eight people max. Some experts say up to 12 is OK, but that’s a bit large for the [“two-pizza” rule](https://landing.directorpoint.com/blog/amazon-two-pizza-rule/). \n\n## Why building a DevOps team is important\n\nEven though DevOps is arguably the most efficient way to get software out the door, no one actually ever said it’s easy. So building the right DevOps team is a critical step in the process. \n\nThe right DevOps team will serve as the backbone of the entire effort and will model what success looks like to the rest of the organization. There is no “one size fits all” however – each team will be different depending on needs and resources.\n\n## 5 examples of DevOps team models\n\nHere are five DevOps organizational models to consider as you get going, according to Matthew Skelton and Manuel Pais, experts who wrote a book called Team Topologies about this topic and then updated the book with a [related microsite](https://web.devopstopologies.com). Their work is a must-read for anyone who’s trying to figure out which DevOps structure is best for their company.\n\n### **1. Dev and ops co-exist, with a “DevOps” group in between**\n\nThis can be a good interim strategy until you can build out a full DevOps program. The DevOps team translates between the two groups, which pretty much stay in place as they currently are, and DevOps facilitates all work on a project. \n\nJust don’t keep this structure in place too long. You don’t want to reinforce the separate silos as they currently exist for any longer than absolutely necessary.\n\n### **2. Dev and ops groups remain separate organizationally but on equal footing**\n \nThis is also a reasonable place to start: Everyone collaborates but can specialize where needed. Common tools will go a long way to helping facilitate good communication. In this model, several dev teams can be working on different products or services. \n\nMake sure teams communicate regularly. Invite a rep from each camp to the other’s meetings, for instance. And appoint a liaison to the rest of the company to make sure executives and line-of-business leaders know how DevOps is going, and so dev and ops can be part of conversations about the top corporate priorities.\n\n### **3. Create one team, maybe “no ops”?**\n\nIn this model, a single team has shared goals with no separate functions. The reason it’s called [“no ops”](https://searchitoperations.techtarget.com/definition/NoOps) is because ops is so automated it’s like it doesn’t actually exist. \n\nThis level of automation is so “aspirational” that many experts express caution about this approach. To eliminate any hands-on tasks, teams would need extensive machine learning and artificial intelligence solutions, and a flat, streamlined organization that prioritizes communication and workflow. TL;DR: [NoOps may not ever be a reality](https://www.cio.com/article/220351/what-is-noops-the-quest-for-fully-automated-it-operations.html).\n\nHowever, don’t use this as an excuse to do away with the ops team. You are going to need those folks. Devs can’t do it all.\n\n### **4. Ops as infrastructure consultants**\n\nThis model works best for companies with a traditional IT group that has multiple projects and includes ops pros. It’s also good for those using a lot of cloud services or expecting to do so. \n\nHere, ops acts as an internal consultant to create scalable web services and cloud compute capacity, a sort of mini-web services provider. In our [2021 Global DevSecOps Survey](/developer-survey/), a plurality of ops pros told us this is _exactly_ how their jobs are evolving — out of wrestling toolchains and into ownership of the team’s cloud computing efforts. Dev teams continue to do their work, with DevOps specialists within the dev group responsible for metrics, monitoring, and communicating with the ops team.\n\n### **5. DevOps-as-a-service**\n\nYou may decide your organization just doesn’t have the internal expertise or resources to create your own DevOps initiative, so you should hire an outside firm or consultancy to get started. This [DevOps-as-a-service (DaaS) model](https://medium.com/swlh/pros-and-cons-of-devops-as-a-service-a40b8796533c) is especially helpful for small companies with limited in-house IT skills.\n\nUsing DaaS in the short term offers another advantage: the opportunity to learn from your outsourcer how to eventually create your own internal DevOps team.\n\nMake sure you understand the outsourcer’s security landscape and your own responsibilities in this area, as you would with any outside firm. The difference here is that the team, processes, and software the outsourcer plans to use will be deeply embedded in your company’s infrastructure — it’s not something you can easily switch from. Also ensure that the outsourcer’s tools will work with what you already have in-house.\n\nFinally, keep a keen eye on costs and understand how the outsourcer will charge for its services.\n\n## Other organizational DevOps schemes include:\n\nA two-tier model, with a business systems team responsible for the end-to-end product cycle and platform teams that manage the underlying hardware, software, and other infrastructure. \nDevOps and SRE groups are separate, with DevOps part of the dev team and Site Reliability Engineers part of ops. This model requires a mature operations and development culture. \n\nWhichever organization model you choose, remember the idea of DevOps is to break down silos, not create new ones. Constantly reevaluate what’s working, what’s not, and how to deliver most effectively what your customers need.\n\n## Key characteristics of a successful DevOps team\n\nHere are some key charecteristics that you can expect to find in a well running DevOps team:\n\n* **Collaboration.** A DevOps team may have as few as 2 members to as many as 12 or more. \n* **Communication.** Nothing creates more bottlenecks on a team than members who don’t talk to each other, and DevOps projects always have a million moving parts. Document progress in a project thread, have regular meeting syncs or check in via Slack to keep team members up to speed and discuss any hurdles to avoid burnout or major delays. \n* **Team autonomy.** Work together, but also be able to work alone.\n* **Willingness to iterate.** Nothing will be perfect the first time, or even the second. In fact, a lot of DevOps work is just about making continuous, as-needed improvements to existing work, or replacing something that is no longer working for the original purpose. Keep on iterating!\n* **Fast feedback, high empathy and trust.** DevOps can feel like a whirlwind. Be mindful and respectful of the difficulties your teammates may be dealing with, be ready to give and receive feedback quickly, and trust each other for an optimal outcome and pleasant work environment.\n\n## Getting started with DevOps\n\nThere are a few steps to follow in order to get started on the planning and development of your DevOps team. Here are some pointers:\n\n**1. Create a roadmap.** Start with the basic goals, add in wish list items, and write it all out attaching a timeframe as needed. The map should include a list of action items broken down by priority and who is responsible for completing each step.\n\n**2. Ensure buy-in, and maybe add a champion.** Evangelize DevOps to the entire organization. Some teams find having a dedicated DevOps champion can help. \n\n**3. Select the solution.** Consider the budget, needs, and knowledge levels to make the best technology choices for the team.\n\n**4. Automate processes where appropriate.** DevOps doesn’t work without automation and for many teams, automation is the top priority. Look at areas where you can reduce manual work.\n\n**5. Set up monitoring.** Have a process for monitoring security, metrics, and everything in between.\nTrack progress. Always be able to give stakeholders a status update.\n\n_Johanna Ambrosio is a technology writer in the greater Boston area._\n",[9,749,2535],{"slug":5965,"featured":6,"template":686},"key-organizational-models-for-devops-teams","content:en-us:blog:key-organizational-models-for-devops-teams.yml","Key Organizational Models For Devops Teams","en-us/blog/key-organizational-models-for-devops-teams.yml","en-us/blog/key-organizational-models-for-devops-teams",{"_path":5971,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5972,"content":5978,"config":5985,"_id":5987,"_type":14,"title":5988,"_source":16,"_file":5989,"_stem":5990,"_extension":19},"/en-us/blog/kontra-and-gitlab-integrate-vulnerability-education-into-the-devops-workflow",{"title":5973,"description":5974,"ogTitle":5973,"ogDescription":5974,"noIndex":6,"ogImage":5975,"ogUrl":5976,"ogSiteName":670,"ogType":671,"canonicalUrls":5976,"schema":5977},"Integrating vulnerability education into DevOps workflows","Interactive training labs are now available within the GitLab platform from Kontra Application Security, a ThriveDX company.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668199/Blog/Hero%20Images/KontraCover.png","https://about.gitlab.com/blog/kontra-and-gitlab-integrate-vulnerability-education-into-the-devops-workflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Kontra and GitLab integrate vulnerability education into the DevOps workflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gyan Chawdhary\"}],\n        \"datePublished\": \"2022-03-31\",\n      }",{"title":5979,"description":5974,"authors":5980,"heroImage":5975,"date":5982,"body":5983,"category":769,"tags":5984},"Kontra and GitLab integrate vulnerability education into the DevOps workflow",[5981],"Gyan Chawdhary","2022-03-31","\n\nInteractive training labs are now available within the GitLab DevOps platform from Kontra Application Security, a ThriveDX company. This integration allows GitLab users to access Kontra’s interactive security training modules from the familiar Merge Request (MR) and pipeline experiences to quickly learn about and fix vulnerabilities reported through automated security scans.\n\nKontra’s content is also available in GitLab’s vulnerability management features, providing the same easy access to training on vulnerabilities identified from these same security scans, as well as other sources such as penetration tests or bug bounty programs. By putting interactivity into our learning simulations, we put the developer first, helping them to understand the risk and impact of a vulnerability from an attacker's perspective.\n\n## So, what is Kontra?\n\nKontra is a scalable Application Security Training platform powered by ThriveDX. This training application was built for modern development teams and it aims to give developers the most advanced security simulations for the best quality training. Kontra works by creating short educational sessions of real-life security incidents to give developers the necessary skills to build and maintain secure application code. \n\nBy going through a simulated security scenario, developers gain better insight into how to get ahead of would-be cyber attackers. \n\n## The benefits of interactive developer security education\n\nAs enterprise developers become increasingly responsible for the security and integrity of their applications, they require relevant, actionable, and engaging security education that enables them to:\n\n- quickly understand and resolve security vulnerabilities\n- design controls to proactively prevent security issues\n- confidently communicate and assign security issues within engineering teams\n\nUnfortunately, these skills are almost never taught in academic courses or coding bootcamps. To address this gap, enterprise software developers often undergo annual developer security training, which typically involves consuming a PowerPoint presentation or watching a recorded presentation on software vulnerabilities and issues. The problem with this style of training is that it lacks actionable explanations, is too passive, or contains generic content that doesn't resonate with developers and engineers.\n\nKontra’s short training sessions are designed to be played in less than five minutes, ensuring that the correct explanations are provided to the developer to fully understand the security impact of a reported vulnerability and how to address it. The short sessions also make it easier to apply the security fix to the code.\n\n## The elements of interactive training\n\nThe most important aspect of training and education is how you convey and communicate ideas visually. This requires strong visual design, empathy, aesthetics, and communication with the learner. Kontra’s interactive training tutorials are offered in multiple programming languages and frameworks, ensuring each lesson is relevant to the developer.\n\nKontra’s learning environment consists of many different interactive UI elements which, depending on a specific vulnerability, are dynamically shown to the learner, ensuring that both the context and the impact of a vulnerability are demonstrated. \n\n![Kontra learning console](https://about.gitlab.com/images/blogimages/Kontra1.png){: .shadow}\n\n## How developers experience the vulnerability education integration\n\nTo have the highest impact, training is placed prominently, yet unobtrusively, where developers spend time: in MRs and pipelines. Developers can view vulnerabilities found by automated security scans in a dedicated MR security widget as well as a pipeline security tab. Clicking on a vulnerability shows its details such as a description and any identifiers such as a [Common Vulnerabilities and Exposures (CVE)](https://cve.mitre.org/) or [Common Weakness Enumeration (CWE)](https://cwe.mitre.org/). Once enabled, GitLab can now place a link to a relevant training from Kontra right in this details view. The identifier is used to dynamically locate the relevant content. And for security professionals, the same training content is available when viewing vulnerability details pages from GitLab’s Vulnerability Reports. \n\n## How to install and configure Kontra training\n\nKontra’s training is available to all [GitLab Ultimate](/pricing/ultimate/) customers. Simply [enable it](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#enable-security-training-for-vulnerabilities) for any desired projects.\n\n\n![Kontra security configuration](https://about.gitlab.com/images/blogimages/Kontra3.png){: .shadow}\n\nThen, look at the results from a [GitLab security scan](https://docs.gitlab.com/ee/user/application_security/#security-scanning-tools) (or one of GitLab’s [integration partners](/partners/technology-partners/#security)) in an MR, pipeline security tab, or a vulnerability details page. When you open a vulnerability record, you will see a direct link to training. GitLab will pull a training from Kontra that most closely matches the particular security issue and the specific language or framework in which it was detected.\n\n![Kontra predictable pseudorandom number generator](https://about.gitlab.com/images/blogimages/Kontra2.png){: .shadow}\n\n_Chawdhary is head of application security at ThriveDX SaaS._\n",[231,875,9],{"slug":5986,"featured":6,"template":686},"kontra-and-gitlab-integrate-vulnerability-education-into-the-devops-workflow","content:en-us:blog:kontra-and-gitlab-integrate-vulnerability-education-into-the-devops-workflow.yml","Kontra And Gitlab Integrate Vulnerability Education Into The Devops Workflow","en-us/blog/kontra-and-gitlab-integrate-vulnerability-education-into-the-devops-workflow.yml","en-us/blog/kontra-and-gitlab-integrate-vulnerability-education-into-the-devops-workflow",{"_path":5992,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5993,"content":5999,"config":6004,"_id":6006,"_type":14,"title":6007,"_source":16,"_file":6008,"_stem":6009,"_extension":19},"/en-us/blog/kubernetes-chat-with-joe-beda",{"title":5994,"description":5995,"ogTitle":5994,"ogDescription":5995,"noIndex":6,"ogImage":5996,"ogUrl":5997,"ogSiteName":670,"ogType":671,"canonicalUrls":5997,"schema":5998},"Kubernetes and the open source community: We chat with Joe Beda","Our CEO sits down with Kubernetes co-creator Joe Beda to talk about the future of open source.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680604/Blog/Hero%20Images/tech-explorers-cover.png","https://about.gitlab.com/blog/kubernetes-chat-with-joe-beda","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Kubernetes and the open source community: We chat with Joe Beda\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-05-20\",\n      }",{"title":5994,"description":5995,"authors":6000,"heroImage":5996,"date":6001,"body":6002,"category":1318,"tags":6003},[788],"2019-05-20","\n\nJoe Beda is the Principal Engineer at VMWare and co-creator of Kubernetes. Beda and Craig McLuckie’s Google project to build a container orchestration tool has exploded and Kubernetes is now a large, open source community with thousands actively contributing to the project thanks to the [Cloud Native Computing Foundation](https://cncf.io/). In the world of open source they don’t get much better than Joe Beda, which is why we were thrilled to speak with him as part of our TechExplorers series where we sit down with the industry’s tech leaders.\n\nJoe and GitLab CEO [Sid Sijbrandij](/company/team/#sytses) went over a variety of topics like cloud native, Kubernetes, the business of open source, and many others. What was most interesting, but not surprising, was the integral role the open source community had in the success of these projects.\n\n“I think open source is evolving… It’s never been something that’s sat still. One of the lessons from Kubernetes more than anything else is that open source today is about community, if not more than code,” Beda says. He admits that right now is a tumultuous time for open source, with the line between product and project getting blurred. The “business” of open source can sometimes alienate the community that supported these initiatives in the first place, something many leaders will have to navigate in the years ahead.\n\n“It’s like there’s the code and the license for the code, and then there’s the community that builds around it. And even if it’s not a legal contract, I think there’s a social contract between the leaders of an open source project and the people who are members of that community. And I think you have to be very respectful of that social contract.”\n\nOne of the most important things an open source project can do to maintain the trust of the community, according to Beda, is to be very explicit about its motivations from the beginning. At GitLab, we’ve taken this message to heart and have [our promises to the open source community](/company/stewardship/) public on our website.\n\nKubernetes has already made a major impact on the way we deploy applications, and users continue to contribute and add to the project. “I think I’m still blown away with just the diversity of the projects that are building on top of Kubernetes,” he says. Even with recent challenges, Beda’s encouraged at the innovation he continues to see in open source. It all boils down to buy-in from the community and giving them the tools to keep innovating. “I think this is part of the excitement... There is a really vibrant set of projects that are experimenting, trying things out. And it’s going to be the users who decide what’s successful here.”\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/6IlyxHFedpo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nVideo directed and produced by [Aricka Flowers](/company/team/#arickaflowers)\n{: .note}\n\n\n## Takeaways\n\n\n### On the future of open source:\n\n>“I think open source is evolving… It’s never been something that’s sat still. One of the lessons from Kubernetes more than anything else is that open source today is about community, if not more than code.”\n\n\n### On building an open source company:\n\n>“My advice to anybody who is building a company around open source is to understand sort of where are your levers, where is the value that you’re adding, and try and be creative about finding ways to add value where something like this can’t happen.”\n\n\n### On the early days of Kubernetes:\n\n>“The real story is that there was a set of us that just wanted to be able to hack on some stuff and not have to go through all the process of shipping stuff to Google… But also we very much had the idea from the start that we wanted to build a community. We wanted to enable other people to own it, to be part of it, to really feel like they were instrumental in making it happen. And that’s what happened.”\n\n\n### On enterprise cloud adoption:\n\n>“I think that as we start to see these enterprises start to adopt cloud, understanding the power dynamics and the relationship with cloud, I think that there is a lot of concern about how do I get some independent advice, independent thought, independent support that’s going to actually stay with me as I figure out where my position lands as I move from on-prem to cloud and beyond.”\n\nWe’ll be at KubeCon Barcelona May 20 – 23, booth #S21. Learn how you can get started with GitLab and Kubernetes, and be sure to check out Joe Beda’s keynote on May 21.\n",[9,1041,1477],{"slug":6005,"featured":6,"template":686},"kubernetes-chat-with-joe-beda","content:en-us:blog:kubernetes-chat-with-joe-beda.yml","Kubernetes Chat With Joe Beda","en-us/blog/kubernetes-chat-with-joe-beda.yml","en-us/blog/kubernetes-chat-with-joe-beda",{"_path":6011,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6012,"content":6017,"config":6022,"_id":6024,"_type":14,"title":6025,"_source":16,"_file":6026,"_stem":6027,"_extension":19},"/en-us/blog/kubernetes-chat-with-kelsey-hightower",{"title":6013,"description":6014,"ogTitle":6013,"ogDescription":6014,"noIndex":6,"ogImage":5996,"ogUrl":6015,"ogSiteName":670,"ogType":671,"canonicalUrls":6015,"schema":6016},"Kubernetes and the future of cloud native: We chat with Kelsey Hightower","Our CEO sits down with Google Staff Developer Advocate Kelsey Hightower to talk fundamentals, the future of cloud native, and Kubernetes.","https://about.gitlab.com/blog/kubernetes-chat-with-kelsey-hightower","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Kubernetes and the future of cloud native: We chat with Kelsey Hightower\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-05-13\",\n      }",{"title":6013,"description":6014,"authors":6018,"heroImage":5996,"date":6019,"body":6020,"category":1318,"tags":6021},[788],"2019-05-13","\n\n[Kelsey Hightower](https://twitter.com/kelseyhightower) is a Staff Developer Advocate at Google, co-chair of KubeCon, the largest Kubernetes conference, and an avid open source technologist. Naturally, we couldn’t think of a better first subject for TechExplorers, a new blog series where we talk to the industry’s tech leaders.\n\nGitLab CEO [Sid Sijbrandij](/company/team/#sytses) sat down with Kelsey to talk about a variety of topics like cloud native, Kubernetes, infrastructure challenges, understanding new technology, and much more. One topic that came up again and again was fundamentals. Even with so many new technologies and methodologies out there – Kubernetes, [serverless](/topics/serverless/), cloud native – the basics of computing remain the same. It’s only when we understand the fundamentals and commit to building reliable code that we can make the most of these new platforms.\n\nOne of the biggest challenges Kelsey sees is the “all-or-nothing” approach. “Either I’m all serverless, or I’m all Kubernetes, or I’m all traditional infrastructure. That has never made sense in the history of computing,” he says. Ultimately, you don’t have to choose: Pick the platforms that work best for the job.\n\nGoing forward, Kelsey hopes that development continues to focus on high-level interfaces and hide the infrastructure underneath. Organizations want to have as little interaction with servers as possible. “That is what we’re trying to do. Anything more than that is noisy, and it’s kind of serving our own self-interest … We need those creative people not to be wasting time trying to build up a cloud platform before they can solve real problems.”\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/9OHNejqXOoo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nVideo directed and produced by [Aricka Flowers](/company/team/#arickaflowers)\n{: .note}\n\n## Takeaways\n\n### On early Kubernetes:\n\n>\"... When it first came out, just based on my previous experience as a system administrator, this is the thing you’re trying to build all those years. So, when I saw it, I immediately knew this thing solves my problems. So, I think I kind of attacked it as a contributor first. And someone who wanted to teach other people what I saw in it. Not sure if it was ever going to blow up or not. But it definitely had the right footprint when it came out.\"\n\n### On teaching others:\n\n>\"I usually try to explain things based on the fundamentals, and then break down the technology until we get to the bottom. So, whenever something new comes out, my guess is it’s not going to change how we do computing. That hasn’t changed in a long time ... Once you learn the three, four, five basic fundamentals, then you just look at the new technology, and you just work your way down.\"\n\n### On invisible infrastructure:\n\n>\"Forever, people have tried to build a thing where most of the organization **doesn’t think about servers**. So whether you’re using Kubernetes, or virtualization for that matter, the whole goal is that if I check in code, there should be very little interaction with infrastructure to get that deployed to customers. To me, serverless is just a reminder to us that we should focus on a high-level interface and hide the various infrastructure underneath.\"\n\n### On adopting cloud native platforms:\n\n>\"If you take your app that you wrote 20 years ago and neglect it all this time, you don’t have any of those kind of controls, and you just move that app into the cloud native type of design patterns, it’s going to be worse than what you had before … People have to understand that there’s tradeoffs. You’re going to have to _write more reliable code_ if you expect to be able to adopt these platforms.\"\n\n## On monoliths:\n\n>\"There’s nothing wrong with monoliths, honestly. People have gotten themselves in a spot where they can’t really update the code. It’s messy. The codebase is all over the place. And if you take that same mentality to functions, you’re just going to have a mess of functions that are going to be all over the place and not even know how to call them.\n\n>\"_Discipline is required no matter what the platform is._ People think platform will absolve them from discipline.\"\n",[9,1041,1477],{"slug":6023,"featured":6,"template":686},"kubernetes-chat-with-kelsey-hightower","content:en-us:blog:kubernetes-chat-with-kelsey-hightower.yml","Kubernetes Chat With Kelsey Hightower","en-us/blog/kubernetes-chat-with-kelsey-hightower.yml","en-us/blog/kubernetes-chat-with-kelsey-hightower",{"_path":6029,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6030,"content":6036,"config":6041,"_id":6043,"_type":14,"title":6044,"_source":16,"_file":6045,"_stem":6046,"_extension":19},"/en-us/blog/kubernetes-terminology",{"title":6031,"description":6032,"ogTitle":6031,"ogDescription":6032,"noIndex":6,"ogImage":6033,"ogUrl":6034,"ogSiteName":670,"ogType":671,"canonicalUrls":6034,"schema":6035},"Understand Kubernetes terminology from namespaces to pods","Kubernetes can be a critical piece of successful DevOps but there's a lot to learn. We explain the terms and share a hands-on demo.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670635/Blog/Hero%20Images/kubernetesterms.jpg","https://about.gitlab.com/blog/kubernetes-terminology","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Understand Kubernetes terminology from namespaces to pods\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-07-30\",\n      }",{"title":6031,"description":6032,"authors":6037,"heroImage":6033,"date":6038,"body":6039,"category":679,"tags":6040},[851],"2020-07-30","\n\n_If you're brand new to Kubernetes, you'll want to start with our [Kubernetes 101 guide](/blog/kubernetes-101/)._\n\nKubernetes and containers are often seen as two key elements in a [successful DevOps practice](/topics/devops/). But there's no question that Kubernetes can be intimidating to those not familiar with it. In fact, our [2020 Global DevSecOps Survey](/developer-survey/) found just 38% of respondents are actively using Kubernetes today while 50% are not. Anecdotally though, interest in Kubernetes is very high:\n\n_\"We are on the path to get our monolithic server into a sert of microservices and the goal is to use Kubernetes to help on this side.\"_\n\n_\"We're trying to get there.\"_\n\n_\"It's a priority for our platform team.\"_\n\nThis past spring staff distribution engineer [Jason Plum](/company/team/#WarheadsSE) and senior distribution engineer [Gerard Hickey](/company/team/#ghickey) walked attendees at GitLab's company-wide meeting Contribute through something they called _Kubernetes 102_ that looked at the practical building blocks required for a cloud-native application on [Kubernetes](https://kubernetes.io). As Jason puts it in the [video](https://www.youtube.com/watch?v=jdKXBJLHP8I&feature=emb_title), \"what we're trying to do here is to not just say, 'Look at all the magic we do' but actually explain the things we're doing right.\" Although this was a \"laptops out\" demo, here's a look at the key concepts and Kubernetes terminology you'll need to understand followed by a link to the entire presentation if you'd like to dive right in.\n\n## Start with containers\n\nA container is not a jail, but a jail is a container, Jason explains. \"A container is a way of packaging an application so that it is portable. It's contained, hence (the term) 'container' and it's immutable. It's the runtime requirements to actually execute and package that up in an immutable form that you can hand to someone.\"\n\nBut containers can have a tendency to get out of hand so you need something to help keep track. That's where Kubernetes comes in, Jason says in the presentation. \"So what is Kubernetes at a high level? I've seen orchestrator, I've seen management system and I've seen coordinator. Kubernetes is all of those things.\"\n\nKubernetes weaves both containers and software-defined networking together, creating \"a platform you can deploy onto with a clear syntax,\" Jason says. \"That syntax is replicable and not vendor bound so that you can deploy it anywhere that supports the official behaviors. Its job is to start containers, keep them running and make sure they're still running. That's what its job is really about.\"\n\n## Unpacking the moving parts\n\nIf you want to get more familiar with Kubernetes, it helps to understand the unique terminology, Jason stresses. Here are key terms that will help to explain the processes involved in running Kubernetes:\n\n**Namespaces**: In Kubernetes, [the namespaces](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) is effectively your working area. It's like a project in GCP or a similar thing in AWS.\n\n**Pods**: [A pod](https://kubernetes.io/docs/concepts/workloads/pods/) is effectively a unit of work. It is a way to describe a series of containers, the volumes they might share, and interconnections that those containers within the pod may need. You can have a pod that has a single container in it (or more than one container). Pods are flexible, too: Update one and it becomes version two, and version one is taken out, giving you a rolling update. As Jason spells out, \"It gives us a way to say, 'I always want to have three and still be able to migrate an application live from one version to another version without having downtime.'\n\n**Service**: Kubernetes \"has a concept of [a service](https://kubernetes.io/docs/concepts/services-networking/service/),\" Jason says. \"It can be thought of as like a load balancer for pods. It knows which pods are alive, healthy, and ready to respond so that when we try to access whatever pod we want to get to instead of to connect to the deployment and getting the one we get, and then always asking that pod for work.\"\n\n**Ingress**: This works with the service to make sure everything ends up in the right place. [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) can also provide load balancing.\n\n**ConfigMaps**: This is an API object for storing information in key-value pairs. \"A [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) is very useful for doing things like pre-stashing environment variables or files that can actually be mounted directly into pods without actually having to have an actual file system somewhere,\" Jason says, adding that they're not meant for confidential data.\n\n**Secrets**: [Secrets](https://kubernetes.io/docs/concepts/configuration/secret/) are an object and a place to store confidential information as the name implies.\n\nNow that you have the Kubernetes terminology down, watch the entire presentation here:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/jdKXBJLHP8I\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n**Read more about Kubernetes**:\n\n* [Keep your Kubernetes runners moving](/blog/best-practices-for-kubernetes-runners/)\n\n* Set up GitLab CI/CD on [Google Kubernetes Engine](/blog/gitlab-ci-on-google-kubernetes-engine/) in 15 minutes!\n\n* Create a [Kubernetes cluster](/blog/gitlab-eks-integration-how-to/) on Amazon EKS\n\nCover image by [Matti Johnson](https://unsplash.com/@matti_johnson) on [Unsplash](https://unsplash.com)\n{: .note}\n\n## Read more on Kubernetes:\n\n- [How to install and use the GitLab Kubernetes Operator](/blog/gko-on-ocp/)\n\n- [Threat modeling the Kubernetes Agent: from MVC to continuous improvement](/blog/threat-modeling-kubernetes-agent/)\n\n- [How to deploy the GitLab Agent for Kubernetes with limited permissions](/blog/setting-up-the-k-agent/)\n\n- [A new era of Kubernetes integrations on GitLab.com](/blog/gitlab-kubernetes-agent-on-gitlab-com/)\n\n- [What we learned after a year of GitLab.com on Kubernetes](/blog/year-of-kubernetes/)\n",[1477,9,1041],{"slug":6042,"featured":6,"template":686},"kubernetes-terminology","content:en-us:blog:kubernetes-terminology.yml","Kubernetes Terminology","en-us/blog/kubernetes-terminology.yml","en-us/blog/kubernetes-terminology",{"_path":6048,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6049,"content":6055,"config":6061,"_id":6063,"_type":14,"title":6064,"_source":16,"_file":6065,"_stem":6066,"_extension":19},"/en-us/blog/leah-petersen-user-spotlight",{"title":6050,"description":6051,"ogTitle":6050,"ogDescription":6051,"noIndex":6,"ogImage":6052,"ogUrl":6053,"ogSiteName":670,"ogType":671,"canonicalUrls":6053,"schema":6054},"From motorcycle stunter to DevOps: Finding love for CI/CD","Switching to GitLab helped a newly minted DevOps engineer grasp the concept of CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663760/Blog/Hero%20Images/image-for-leah-post.jpg","https://about.gitlab.com/blog/leah-petersen-user-spotlight","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Motorcycle stunter turned DevOps engineer says GitLab helped her learn to \"love\" CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2018-06-21\",\n      }",{"title":6056,"description":6051,"authors":6057,"heroImage":6052,"date":6058,"body":6059,"category":679,"tags":6060},"Motorcycle stunter turned DevOps engineer says GitLab helped her learn to \"love\" CI/CD",[3485],"2018-06-21","\nWhen professional motorcycle stuntwoman turned developer Leah Petersen switched from Jenkins to GitLab, she was a bit nervous to say the least. Having only worked in tech for nine months, the [Samsung SDS](https://www.samsungsds.com/us/en/index.html) engineer was not enthused about the prospect of having to learn a new application after feeling like she had “just started to get competent” with Jenkins.\n\nAfter a self-described mini pity party, she dove into GitLab head first, jumping into a few big ticket projects to get a handle on the landscape. Within a few short months, Petersen was so impressed by her GitLab CI/CD experience that she felt the need to shout her newfound “love” for continuous integration and continuous delivery from the virtual mountaintop of [her blog](https://leahnp.github.io/2018/moving-from-jenkins-to-gitlab-CI/).\n\nWe recently met up with Petersen to learn more about her transition to the tech world and experience with GitLab.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Avx_RftRT_o\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Q & A with Leah Petersen, DevOps Engineer\n\n**Where do you work and what does your team do?**\n\nI work for a team in Samsung SDS called the Cloud Native Computing Team, and I'm [a DevOps engineer](https://about.gitlab.com/topics/devops/what-is-a-devops-engineer/). We deal primarily with containers in Kubernetes and helping companies modernize and move to the cloud. My team is super unique. We were kind of treated like an incubated startup within Samsung, so we're really given a lot of autonomy to make our own decisions.\n\nOur team was put together about five years ago, and Samsung really made a bet on Kubernetes being the future of orchestrating huge workloads in the cloud. Initially, we were focusing mainly on research and development, contributing to the Kubernetes community and learning who was a part of it, what their motives were, and how we could find our place in it. Over the last year, Samsung has really pivoted our role in the company, and we're looking at how we can help Samsung as a global organization move to Kubernetes and containers.\n\n**Where did you work before Samsung?**\n\nI was a motorcycle stunt rider before I became an engineer, and that career kind of organically grew out of my passion for motorcycles. I started stunting, loved the community and was able to meet people all over the country and travel. Being one of the few women who did it, I organically started getting calls for jobs and gigs. I thought, “If I can do this in my 20s and make this my full-time career, I'm definitely going to take a shot at it,” so I did.\n\nIt was an amazing opportunity and experience to travel the world and meet people all over this planet who are passionate about this crazy thing that I'm also passionate about. And I got to work with a lot of amazing brands and raise awareness about the sport that I love. So, I don't have any regrets about that and cherish the time that I got to spend on a motorcycle professionally.\n\n**How did you move from being a professional motorcycle stunter to a DevOps engineer?**\n\nI had been looking for a new career path and wasn't really sure what I was going to do. I knew that I wanted to build some tangible skills. I wanted skills that had a clear market value, and tech definitely provides that.\n\nI ended up taking an online coding course in Python, and had this “aha” moment where I realized, not only can I do this, which I didn't think was previously possible, but it's fun; I really like solving these problems. At that point I started taking more online courses and learning as much as I could for free. Then I ended up finding [Ada Developers Academy](https://www.adadevelopersacademy.org/), and that was the perfect segue into the industry.\n\n> I had this “aha” moment where I realized, not only can I do this, which I didn't think was previously possible, but it's fun\n\n**Can you describe how your experience has been as woman in tech?**\n\nYou definitely get a lot of strange reactions being a woman in tech. Walking into a situation, oftentimes people are surprised you're an engineer. You'll get reactions like, “Oh, I thought you were a project manager,” or, “I thought you were a recruiter,” or whatever other stereotype that you brought into the room. That can be discouraging and makes you feel unwelcome in that space. But I think we need women in every part of tech: frontend, backend, DevOps, operations, everything. If your interest is in UX, go for that. But don't let all the men who've been in the industry for 25 years on the operations side of things scare you off either. I really think we need diverse minds and approaches to problems in the whole spectrum of it.\n\nSometimes I forget about the gender disparity in tech because my team, specifically, has a couple of really amazing women who I get to work with every day. So, I'm very fortunate. But I recently went to KubeCon in Copenhagen, and it's a amazing conference with so much energy, but it's a real wake up call when you see the gender disparity there. There's 4,000 guys walking around and you feel like you stick out [or] when you're sitting in an auditorium, look around and realize, “Oh, I'm the only lady here.” It's something that you can't look away from.\n\n**Why did you decide to go into DevOps engineering?**\n\nIn my boot camp classes we were focusing on web development and building Ruby on Rails and Node.js apps. We each had an opportunity to do an internship at companies in Seattle that support the Ada program. Samsung was one of them, and they came in to do a presentation about their involvement in open source and Kubernetes. I had no idea what they were talking about, but Kubernetes and the momentum of the open source community was really appealing to me. So I took a chance and picked Samsung, dove right in, and found my way as I went along. I'm really happy that I chose Kubernetes and to specialize in the cloud.\n\n>Kubernetes and the momentum of the open source community was really appealing to me. So I took a chance, dove right in, and found my way as I went along\n\n**How did you get started with GitLab CI/CD? And how would you describe your transition to the application?**\n\nI always felt like I was fighting with the CI platform we were on prior to GitLab. It was never really functioning how we wanted it to, and something was always kind of failing. The whole reason you have CI/CD is to get visibility into what's happening with your code, right? You want to run your code through this pipeline and make sure there are no bugs, that you’re packaging it correctly and putting it in the places that you need it to be in production. It's this hugely critical component of going from the developer's computer to the world; that's the pipeline. So you really need the visibility to see what is happening every step of the way.\n\nOn the old system, I felt that I just didn't have that visibility. I was digging for the problems and not able to understand where they were coming from, where they were originating from, why they were happening or how to fix them. I feel like GitLab definitely does a great job of assisting the user in finding the origin of a problem, tracing that step back and making it clear where your issues are and when you're having success.\n\n**How has using GitLab impacted your career and workflow?**\n\nThere's a lot of talk about accessibility and user experience in tech. And we all know what it's like to have a bad user experience with a piece of technology; it's the most frustrating thing in the entire world. As a developer, you deal with lots of different tech every single day. When I started using GitLab about a year and a half into my career, it was certainly the first platform where I was like, ‘I feel so at home here. Everything’s fluid. I can find where everything is. I understand what everything is.’ There aren't these big black holes of confusion that have me asking, “Why does this exist and what am I doing here?’”\n\nWith GitLab, everything is just this cheery, happy place. And I really appreciate how it has now set the bar for me when it comes to the way in which a technology should function when I’m working with it.\n\nCover photo by [Rendiansyah Nugroho](https://unsplash.com/photos/JUePy_-uOSI) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[813,1477,1041,682,749,267,9,1829],{"slug":6062,"featured":6,"template":686},"leah-petersen-user-spotlight","content:en-us:blog:leah-petersen-user-spotlight.yml","Leah Petersen User Spotlight","en-us/blog/leah-petersen-user-spotlight.yml","en-us/blog/leah-petersen-user-spotlight",{"_path":6068,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6069,"content":6074,"config":6079,"_id":6081,"_type":14,"title":6082,"_source":16,"_file":6083,"_stem":6084,"_extension":19},"/en-us/blog/learn-python-with-pj-part-1",{"title":6070,"description":6071,"ogTitle":6070,"ogDescription":6071,"noIndex":6,"ogImage":1489,"ogUrl":6072,"ogSiteName":670,"ogType":671,"canonicalUrls":6072,"schema":6073},"Learn Python with Pj! Part 1 - Getting started","Follow along as our education evangelist Pj Metz learns Python, and shares his experiences in the first of a multi-part series.","https://about.gitlab.com/blog/learn-python-with-pj-part-1","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Learn Python with Pj! Part 1 - Getting started\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"PJ Metz\"}],\n        \"datePublished\": \"2022-02-08\",\n      }",{"title":6070,"description":6071,"authors":6075,"heroImage":1489,"date":6076,"body":6077,"category":679,"tags":6078},[831],"2022-02-08","\n\n_Hello World!_ \n\nMy name is Pj Metz and I’m the education evangelist at GitLab. My day job involves working with universities across the globe to help faculty and students learn to use GitLab for educational or research purposes. Currently, my code experience is limited to C# and JavaScript, with some HTML and CSS in there for good measure. However, one of the most popular languages in the education community is Python, so I decided to jump in and teach myself Python to better connect with my community members. \n\nI’ll be learning on [Codecademy](https://www.codecademy.com), an online interactive learning platform that offers a variety of languages and career path curriculums, both free and paid. It’s where I started learning to code back in 2020 so I’m already comfortable with it’s format and curriculum style. \n\nEvery few weeks you’ll see what I’ve learned and how I’ve applied that new knowledge. I’ll discuss the basics of writing in Python and show off some of what I’ve done. I’m still relatively new to writing code in general, so expect to see this through the eyes of a beginner — not just a Python beginner, but coding in general. I might even make a mistake in my descriptions/explanations. Let’s jump in! 🐍\n\n## First lessons\n\nThe first few lessons involved writing a “hello world” and changing the value of a premade variable.\n\n![codecademy screen showing instructions on the left, the IDE in the middle, and the output on the right](https://about.gitlab.com/images/blogimages/helloworld.png)\n\nI moved on to writing my own variables and experimenting with several different types, including ints, strings, and floats. I learned that you can change a variable after defining it, similar to many languages, and that you can even change the type; the most recently defined type will be the one used at run time. Concatenation works similarly to other languages: using a plus sign to combine variables. I did some reading ahead and learned about [f-strings](https://www.geeksforgeeks.org/formatted-string-literals-f-strings-python/) as an easy method of concatenating strings. I’m used to doing something similar in JavaScript for my [Twitter bots,](https://gitlab.com/MetzinAround/DivasLive) so this felt important to know. \n\nI also learned how to do some control flow through `if`, `elif`, and `else`. The logic remains the same, but conventions are a bit different. I’m used to writing an if statement like this in JavScript. \n\n```javascript\nif(partyRock === 'in the house tonight') {\n  everybody = 'have a good time'\n  console.log(`Party rock ${partyRock} everybody just ${everybody}`)\n} else {\n  everybody = 'sad party rock noises'\n  console.log(everybody)\n}\n\n```\nIn Python, there are no curly braces. Rather, a colon and indent takes care of that work. \n\n```python\nif partyRock == 'in the house tonight':\n   everybody = 'have a good time'\n   print(f\"Party Rock is {partyRock} everybody just {everybody}\")\nelse:\n  everybody = 'sad party rock noises'\n  print(everybody)\n```\n\n## Initial thoughts\n\nI like the readability of Python. It’s a little less cluttered, but I remember being very excited about curly braces when I first learned them. Using them for functions and methods and the like always made me feel like a “real programmer” when I was first starting. That being said, Python syntax is coming along naturally for me. \n\nSomething that’s different for me is the way Python has you initialize variables. C# is a statically typed language, meaning that part of defining a variable is saying what type of variable it is (int, string, float, etc.). Python does not require you to define the type, it will simply know at run-time. This is similar to JavaScript, but it does still throw me since I started learning with C#. Additionally, in JavaScript you have to use `let`, `var`, or `const`. In Python you just … name it and give it a value. Felt strange at first, but has become more natural as I progressed. Not having to define the type always strikes me as “weird,” but that’s personal preference, not anything actually verifiably wrong. \n\nAdditionally, the naming convention of variables is different as well. Python convention dictates underscores as spaces, while C# and JavaScript both prefer camel case, which is where each new word is capitalized. \n\n``` cs\n int minLength = 8\n```\n```javascript\nminLength = 8\n```\n\n``` python\nmin_length = 8\n```\n\nThe [naming conventions of Python](https://www.python.org/dev/peps/pep-0008/#naming-conventions) have certain rules for when to use underscores and how, especially double underscores which behave differently in Python depending on where they appear in the name. I only know what I’ve seen so far in Codecademy, but they’ve named all their variables with underscores instead of spaces. \n\n### Favorite new knowledge\n\nI really like being able to create multiple line strings simply by using three quotes, similar to using three backticks for a code block in markdown. Formatting the output has always been frustrating for me; having to remind myself that `\\n` exists and then looking up how exactly I’m supposed to use it is something I’ve spent an embarrassing amount of time on. And likely will do until the day I hang up my keyboard for good. \n\n![a code block showing a multi line sentence and the terminal output after showing correct format as dictated by the code](https://about.gitlab.com/images/blogimages/pythonmultilinestring.png)\n\nThis is nice in that how it looks in the code is how it looks in the output. I love that! \n\nThis is the first installment in the Learn Python with Pj! series. Make sure to read:\n- [Part 2 - Lists and loops](/blog/learn-python-with-pj-part-2/)\n- [Part 3 - Functions and strings](/blog/learn-python-with-pj-part-3/)\n- [Part 4 - Dictionaries and Files](/blog/learn-python-with-pj-part-4-dictionaries-and-files/)\n- [Part 5 - Build a hashtag tracker with the Twitter API](/blog/learn-python-with-pj-part-5-building-something-with-the-twitter-api/)\n\n",[813,9,2535],{"slug":6080,"featured":6,"template":686},"learn-python-with-pj-part-1","content:en-us:blog:learn-python-with-pj-part-1.yml","Learn Python With Pj Part 1","en-us/blog/learn-python-with-pj-part-1.yml","en-us/blog/learn-python-with-pj-part-1",{"_path":6086,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6087,"content":6092,"config":6096,"_id":6098,"_type":14,"title":6099,"_source":16,"_file":6100,"_stem":6101,"_extension":19},"/en-us/blog/learn-python-with-pj-part-2",{"title":6088,"description":6089,"ogTitle":6088,"ogDescription":6089,"noIndex":6,"ogImage":1489,"ogUrl":6090,"ogSiteName":670,"ogType":671,"canonicalUrls":6090,"schema":6091},"Learn Python with Pj! Part 2 - Lists and loops","Follow along as our education evangelist Pj Metz learns about lists and loops in the second of this multipart series.","https://about.gitlab.com/blog/learn-python-with-pj-part-2","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Learn Python with Pj! Part 2 - Lists and loops\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"PJ Metz\"}],\n        \"datePublished\": \"2022-03-01\",\n      }",{"title":6088,"description":6089,"authors":6093,"heroImage":1489,"date":5212,"body":6094,"category":791,"tags":6095},[831],"\nWe’re back with another article about my journey to learn Python. Check out the [first article](/blog/learn-python-with-pj-part-1/) if you want to see what I’ve already learned. Today we’re talking about lists and loops, two important parts of all programming languages. Let’s check them out. \n\n## Lists\n\nLists are a way to store information that can be accessed later. They are similar to arrays in other languages. A list is a named collection of other elements inside brackets that can be accessed by an index number. \n\n``` python\n#I will be using this list for all our examples, and, yes, these are some of my favorite musical acts pulled directly from my Spotify 2021 wrapped. \nfavorite_music = ['The Midnight', 'Night Tempo', 'St. Lucia'] \n```\n\nIn this list, each element of the list can be accessed by an index number. Like many other languages, python is zero-indexed, meaning the first element is at index 0. So favorite_music[0] is “The Midnight”, favorite_music[1] is “Night Tempo”, and so on. \n\nSomething interesting about lists in Python is that a negative 1 index number will give you the last element in the list. Negative 2 will give you the second to last, and so on. As far as I can tell, this isn’t possible in other languages: Negative 1 indices will return errors or `undefined` in arrays or lists in other languages. I imagine a scenario where we’ve just added something to a list and need to access it immediately. We could use the negative index number to access the most recently added element. \n\nPython comes with several built-in methods to be used with lists. Some of them have the list passed in as an argument, some are added to the list with a `.` so it can be used. These methods will change the list or return some kind of information about the list. Below are a few I found useful, but a more complete explanation of available methods is [available here](https://docs.python.org/3/tutorial/datastructures.html). \n\n### .pop()\n\nPop allows you to remove a specific element in a list as well as return it at the same time, meaning this can be set to a variable. To specify the element, use the desired index number inside the parentheses to remove it. \n\n```python\nbest_synthwave = favorite_music.pop(0)\n\n#returns ‘['Night Tempo', 'St. Lucia']’\nprint(favorite_music)\n\n#returns 'The Midnight'\nprint(best_synthwave)\n```\n### .append() and .insert()\n\nAppend allows you to add an element to a list. Put the element in the parenthesis. The element is added to the end of the list. Insert allows you to say exactly where you would like the element inserted. The first argument is the index you would like to replace, and the second argument is the element to insert. \n\n```python\nfavorite_music.append('Turnstile') \n\n#This will print ['The Midnight', 'Night Tempo', 'St. Lucia', 'Turnstile']\nprint(favorite_music)\n\nfavorite_music.insert(1, 'Kendrick Lamar')\n#This will print 'The Midnight', 'Kendrick Lamar', 'Night Tempo', 'St. Lucia', 'Turnstile'] \n#Turnstile is still there since we appended it before. \nprint(favorite_music)\n```\n\n### len()\n\nLen gets the length of the object passed into it. This is important since you can know exactly how many elements are in a list, which is useful for control flow as well as loops. \n\n```python\nlength_of_music = len(favorite_music)\n\n#working with the original list will print “3”\nprint(length_of_music)\n```\nNotice that it prints how many elements are in the list, not how many indices. I have to work to make sure to keep those two ideas separate. So there are three elements in the list, but the indices are [0], [1], and [2]. \n\n\n## Loops\n\nLoops work very much the same way they do in other languages, but like I’ve seen with the rest of Python, the syntax is more readable and the code just looks a bit cleaner. The two main ways to use loops with Python are `for` and `while`. \n\n### for\n\nFor is used when you want to iterate through each element in an object. The syntax you use here creates a kind of one-time use variable that is then used in the code block in a variety of ways. Let’s say you want to print each band from the favorite_music list from before. \n\n```python\nfor band in favorite_music:\n  print(band)\n```\n\nThis would print each band on its own line. If you call print() on favorite_music, it would print the array inside of brackets. You can perform logic inside of for loops to only return certain items. Say you want to only print bands that have “night” in the name:\n\n```python\nfor band in favorite_music:\n    lower_case_band = band.lower()\n    if lower_case_band.__contains__('night'):\n      print(band)\n```\n\nNote: I put all the strings into lower case so we could match cases. Also, I found the contains method on the internet and the example had two underscores on either side. It made my code work whereas without the underscores it did not work. Like I said in the first article, I’m new here and don’t know why it did that.\n\n**EDIT March 7, 2022:** According to commenter \"Glen666\" in the comments, the easier way to check if something is contained in another object is to use the `in` operator. It would look like this: \n\n```python\nfor band in favorite_music:\n  lower_case_band = band.lower()\n  if \"night\" in lower_case_band:\n    print(band)\n```\nThanks for catching this. I hadn't learned `in` yet so this makes it a bit easier! \n\n### while\n\nWhile creates a loop that goes as long as certain criteria are being satisfied, usually a logic expression. If you want some code to run six times, you could use a while loop. \n\n```python\ni = 0\n#This prints the string below 6 times. \nwhile i \u003C 7:\n  print('The Midnight is my favorite band of all time.')\n  i += 1\n```\n\nThis is useful if you want code to run the whole time some circumstance is true, whether it’s a date, another process is running, or anything of the sort. \n\n**EDIT March 7, 2022:** Thanks to user \"magicolf\" in the comments! They let me know that there's an error here where it prints 7 times instead of 6. Because i is declared as `0` first, the loop will actually print seven times. It's easy to make mistakes like this all the time, so I appreciate you letting me know, magicolf! \n\nLoops are some of my favorite things to write so far. It’s like a little puzzle to figure out when you need to iterate through a list or string to make something happen at a specific time. The hardest part about loops is getting used to the logic of it. Python made this easier for me in that loops feel very natural to read. On top of that, I’m getting used to the indentation that I felt was so strange last time. I’ve spent about 30 or so hours working on it so far, and It’s starting to feel very natural. Hopefully, I can keep this up as we move on to the [next learning modules](https://about.gitlab.com/blog/learn-python-with-pj-part-3/)! \n\n",[813,9,2535],{"slug":6097,"featured":6,"template":686},"learn-python-with-pj-part-2","content:en-us:blog:learn-python-with-pj-part-2.yml","Learn Python With Pj Part 2","en-us/blog/learn-python-with-pj-part-2.yml","en-us/blog/learn-python-with-pj-part-2",{"_path":6103,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6104,"content":6110,"config":6116,"_id":6118,"_type":14,"title":6119,"_source":16,"_file":6120,"_stem":6121,"_extension":19},"/en-us/blog/lee-tickett-my-gitlab-journey",{"title":6105,"description":6106,"ogTitle":6105,"ogDescription":6106,"noIndex":6,"ogImage":6107,"ogUrl":6108,"ogSiteName":670,"ogType":671,"canonicalUrls":6108,"schema":6109},"From user, to advocate, to contributor: my GitLab journey","Three years (as a user and as a contributor) with GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681735/Blog/Hero%20Images/cover_photo.jpg","https://about.gitlab.com/blog/lee-tickett-my-gitlab-journey","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"From user, to advocate, to contributor: my GitLab journey\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lee Tickett\"}],\n        \"datePublished\": \"2020-11-13\",\n      }",{"title":6105,"description":6106,"authors":6111,"heroImage":6107,"date":6113,"body":6114,"category":1359,"tags":6115},[6112],"Lee Tickett","2020-11-13","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nI have had a passion for technology since before I can remember. Thirteen years ago I took the plunge, quit my day job, and started an IT development and support company called [Tickett Enterprises Limited](https://www.tickett.net). For the last three years, GitLab has been a part of my journey.\n\n## 3 Years Ago \nWe were (and still are) using a helpdesk system we built ourselves. It does exactly what we need it to do - and any time it doesn’t, we change it. The most important feature of the system is reporting. Specifically, facilitating our monthly billing process; with a click of a button, we generate timesheets and invoices for all of our clients.\n\nThough I was aware of Git (and GitHub), I had not heard of GitLab. We were using SVN in its most basic form (single repository for all projects and no branching), with an integration so all commits would create notes in our helpdesk.\n\n## 2.5 Years Ago\nWe decided that SVN was no longer fit for purpose. Our top issues were: \n* never knowing whether the code in our repository matched what was deployed\n* not being able to work collaboratively on projects\n* feature/knowledge limitations\n* Git was the industry standard \n\nWhile most of these issues were due to the way we were using SVN, we were keen to adopt a more popular system. I don’t remember how I found GitLab, but I did, and spun up a local on-prem instance of Community Edition (CE) using separate projects/repositories and basic branching. If you are considering running a local instance, I recommend the [Bitnami appliance/.ova](https://bitnami.com/stack/gitlab).\n\nIt took some time to get used to local vs remote and to remember to push as well as commit, but we picked it up pretty quickly.\n\n## 2 Years Ago\nWe wanted to use GitLab to help us improve our processes so we:\n* built a little UI for project creation (using the GitLab API). This ensures new projects fit our naming standards, contain our standard template files, have our standard master/test/dev branches, contain the relevant members, and use our webhooks\n* recreated the helpdesk integration we had with SVN (every commit and comment is replicated as a note on our helpdesk)\n* unaware of GitLab EE, we created a custom merge request approval process using webhooks. Our master branch is always protected - a merge request requires 2 approvals from 2 distinct reviewers (one for code and one for functionality)\n\n## 1.5 Years Ago\nA bit late to the party, but finally we set up the GitLab runner to automate our build, spin up our database, execute our unit tests and report test details and code coverage. GitLab CI for .NET was not as well documented as other use cases leading to a lot of trial and error when setting up the runner.\n\nWe are using the Windows runner configured to use a standard shell (which I think is no longer supported). We will either be moving to powershell on windows or possibly using docker images. Here’s a sample .gitlab-ci.yml\n\n```yml\nstages:\n  - build\n  - test\n\nvariables:\n  CI_DEBUG_TRACE: \"false\"\n  ASSEMBLY_VERSION: \"1.0.4\"\n  \nbuild:\n stage: build\n script:\n  - 'C:\\Windows\\Microsoft.NET\\Framework\\v4.0.30319\\nuget restore'\n  - '\"C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\BuildTools\\MSBuild\\15.0\\bin\\msbuild\" /t:Restore,Clean,ReBuild /t:Database:Publish /p:Configuration=Debug;Platform=\"Any CPU\" /p:SqlPublishProfilePath=Database.publish.xml'\n  - 'ping 192.168.99.99 -n 1 -w 10000 2>nul || type nul>nul'\n artifacts:\n  paths:\n   - Tests/bin/\n\ntest:\n stage: test\n script:\n  - 'c:\\GitLab-Runner\\opencover\\OpenCover.Console.exe -returntargetcode:1000 -filter:\"+[*]* -[nunit*]* -[*Tests*]*\" -register -target:\"C:\\Program Files (x86)\\NUnit.org\\nunit-console\\nunit3-console.exe\" -targetargs:\"Tests\\Tests.csproj --result=testresult.xml;transform=C:\\gitlab-runner\\nunit3-junit.xslt\"'\n coverage: '/^Visited Branches .*(\\(\\d+\\.?\\d*\\))/'\n dependencies:\n  - build\n artifacts:\n  reports:\n   junit: testresult.xml\n```\n\nWe were building another customization to allow us to search for code across all repositories. Unfortunately, we hit a limitation because the API did not allow searching anything but the default branch.\n\nAt this point, while Googling for help getting CI up and running, I learned that GitLab is open-source. So I thought maybe I could extend the API to support searching any branch. This lead to [my first contribution](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/28069).\n\n## 1 Year Ago\nAt this point, I was completely new to all of the technologies, techniques, and best practices used by GitLab but found myself participating in my first [GitLab hackathon](https://about.gitlab.com/community/hackathon/). Somehow, I managed to take joint first prize!\n\nMy first few contributions were achieved by modifying my production GitLab installation (not ideal). So it was time to get the [GitLab Development Kit (GDK)](https://gitlab.com/gitlab-org/gitlab-development-kit) up and running. This was certainly not without its challenges (many of which I suspect stem from me being in the minority of GitLab contributors running Windows).\n\nI have since contributed to the [GDK project](https://gitlab.com/gitlab-org/gitlab-development-kit) and joined the GDK office hour calls to help shape the way forward and resolve some of the problems and frustrations.\n\nAt this point, I was leearning a lot. Not just about the tools and languages but about the best practices and work ethos within the GitLab team. Better yet, I was able to start taking some of these learnings back to the office.\n\n## 0.5 Years Ago\nI attended GitLab Commit - London 2019. This really helped to confirm my suspicions; we are only scraping the surface of GitLab's capabilities.\n\nOn a few occasions, I wondered whether GitLab may not be a good fit for my company as I watched huge companies like Porsche and Goldman Sachs present. A [presentation](https://www.youtube.com/watch?v=t0Eh1sq9r5s) by Huss El-Sheikh from startup 9fin helped ease my concerns.\n\nAround this time, I moved from Windows to Ubuntu to make it easier to work with GDK.\n\nI continued to learn a lot from my contributions, feedback, and interactions with the GitLab team, again applying what I could back in the office. Much around the languages/technologies I hadn’t previously worked with (namely ruby, postgres and vue), but also other takeaways such as:\n* when carrying out code reviews ask questions rather than give instructions (“what do you think about x?” is more productive than “change this to y”)\n* GitLab CI is capable of automating a lot of what we currently do by hand (e.g. code review for best practices)\n* always try to add tests when making code changes\n\nI am a firm believer of documenting processes, decisions, and rationale. There’s nothing worse than someone saying “we do it this way” without being able to back that up with reasoning. With that in mind, we implemented Merge Request Templates to ensure our team was consistent in our approach to coding, testing, and releasing.\n\nBy now our development team had plenty of experience with GitLab and we were starting to move our support team over. To help our team leads monitor merge requests, we adopted 2 simple departmental labels (`Support`/`Development`) and used our webhook engine to ensure every MR is automatically labelled.\n\n## Today / What’s Next\nIn preparation for a transition to .NET core, deprecation of the Windows shell runner and a desire to start testing our frontend (web), I started putting a CI script together using docker and the mcr.microsoft.com/dotnet/core/sdk:latest image. The .gitlab-ci.yml looks like;\n\n```yml\nstages:  \n  - build\n  - test\n\nvariables:\n  CI_DEBUG_TRACE: \"false\"\n  ASSEMBLY_VERSION: \"1.0.1\"\n\nbuild:\n stage: build\n tags:\n  - docker\n script:\n  - 'dotnet build'\n\ntest:\n stage: test\n tags:\n  - docker\n script:\n  - 'nohup dotnet run --project Web &'\n  - 'apt-get update'\n  - 'apt-get install -y unzip'\n  - 'wget https://chromedriver.storage.googleapis.com/83.0.4103.14/chromedriver_linux64.zip'\n  - 'unzip chromedriver_linux64.zip -d ~/'\n  - 'rm chromedriver_linux64.zip'\n  - 'mv -f ~/chromedriver /usr/local/bin/chromedriver'\n  - 'chown root:root /usr/local/bin/chromedriver'\n  - 'chmod 0755 /usr/local/bin/chromedriver'\n  - 'wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add -'\n  - 'sh -c ''echo \"deb https://dl.google.com/linux/chrome/deb/ stable main\" >> /etc/apt/sources.list.d/google.list'''\n  - 'apt-get update'\n  - 'apt-get install -y google-chrome-stable'\n  - 'dotnet test -l:trx Tests/Tests.csproj /p:CollectCoverage=true'\n coverage: '/Total\\s*\\|.*\\|\\s(\\d+\\.?\\d*)%\\s*\\|.*\\|/'\n```\n\nAnd the tests look something like;\n\n```c#\n    public class UiTests : IDisposable\n    {\n        private readonly Process _webServerProcess;\n        private readonly IWebDriver _driver;\n\n        [Fact]\n        public void ClickNavPrivacyPolicy()\n        {\n            _driver.Navigate()\n                .GoToUrl(\"http://localhost:5000/\");\n\n            var link = _driver.FindElement(By.LinkText(\"Privacy\"));\n            link.Click();\n\n            Assert.Equal(\"http://localhost:5000/Home/Privacy\", _driver.Url);\n        }\n\n        public UiTests()\n        {\n            ChromeOptions chromeOptions = new ChromeOptions();\n            chromeOptions.AddArguments(\"headless\", \"no-sandbox\");\n            _driver = new ChromeDriver(chromeOptions);\n\n            if (RuntimeInformation.IsOSPlatform(OSPlatform.Linux)) return;\n\n            _webServerProcess = new Process\n            {\n                StartInfo = {\n                    WorkingDirectory = Path.Combine(System.AppDomain.CurrentDomain.BaseDirectory, \"..\", \"..\", \"..\", \"..\", \"Web\"),\n                    FileName = $\"dotnet.exe\",\n                    Arguments = \" run\",\n                    UseShellExecute = true,\n                }\n            };\n            _webServerProcess.Start();\n        }\n\n        private void KillWebServer()\n        {\n            if (_webServerProcess != null && !_webServerProcess.HasExited)\n            {\n                _webServerProcess.Kill();\n            }\n        }\n\n        public void Dispose()\n        {\n            _driver.Dispose();\n            KillWebServer();\n        }\n    }\n```\n\nYou can see some conditional code in there which allows Selenium tests to work both locally on our development machines and remotely on our GitLab runner. If you have a better way of achieving this, please leave a comment. I would love to chat and learn!\n\nI also want to start introducing some linting like we see in the GitLab project to enforce rules around code formatting (spaces, carriage returns, indentation, etc.). I have started to look at JetBrains Resharper (R#) command-line but haven’t had enough time to implement it yet. Ideally. I would like to start with just a rule or two and then slowly introduce more, but it looks quite tricky to take this approach. Please let me know if you’ve been able to achieve this!\n\nI would also like to lose our helpdesk and start using GitLab issues, service desk, timelogs, etc. I am working on identifying the gaps and working with the product managers to understand whether it is realistic to fill those gaps within the GitLab product. Alternatively, I will be looking to build some additional “bolt-ons” using webhooks and the API.\n\nWhile investigating gaps, I stumbled upon the [GitLab-Triage project](https://gitlab.com/gitlab-org/gitlab-triage) and I expect we'll use this to automate various workflows. I managed to help close a few issues and even create a few additional features which would make it work for us by [contributing to the GitLab-Triage project](https://gitlab.com/gitlab-org/gitlab-triage/-/merge_requests?scope=all&utf8=%E2%9C%93&state=merged&author_username=leetickett).\n\nWe also added more labels (`needs code review` & `needs functional review`) for our merge request approval process now. We can see where we are and what needs to be done at a glance. We previously relied on an MR checklist that we are deprecating.\n\n![Merge request checklist](https://about.gitlab.com/images/blogimages/lee-tickett-my-gitlab-journey/mr_checklist.png)\n\n![Merge requests with labels](https://about.gitlab.com/images/blogimages/lee-tickett-my-gitlab-journey/merge_requests_with_labels.png)\n\n## Contributing to GitLab \n\nI am very proud to have joined the GitLab Core Team. Thanks to everyone who has held my hand and patiently assisted me with contributions. \n\nWith the release of Microsoft Windows Subsystem for Linux v2, I have gone back to running Windows on my laptop with GDK running in Ubuntu on WSL2. This is working brilliantly for me at the moment (the way Visual Studio Code handles things especially is really cool).\n\nI now have 95 [merged merge requests!](https://gitlab.com/dashboard/merge_requests?scope=all&utf8=%E2%9C%93&state=merged&author_username=leetickett) and have been helping several others get started contributing (getting GDK up and running etc). Once this crazy pandemic is over and we can start to socialise again, I would like to try and start some sort of local meetup/group.\n\nI would like to help make it easier to connect GitLab users. I have visions of a mechanism to search for others based:\n* the size of their user base \n* the languages they are using\n* the feature they are using\n\nAt present, we have several tools (Gitter, Issues, Forum etc) but there is a strong reliance on being engaged and stumbling on questions/support requests. I suspect many of us would be happy to have other users reach out directly.\n\nIf you need any more information around:\n* getting your development environment/tools setup on Windows 10\n* getting CI working with .NET and SQL Server projects\n* building customisations using GitLab webhooks and API\n\n...or would like to see a demo of anything discussed above, I would be happy to oblige!\n\nI would love to connect with others who are either looking to, or already using GitLab for:\n* .NET projects\n* customer helpdesk \n* customer billing (using timelogs)\n\nThanks for reading! Here's a picture of me and the family repping with our GitLab merch!\n\n![The tickett family repping GitLab](https://about.gitlab.com/images/blogimages/lee-tickett-my-gitlab-journey/landing_page.png)\n",[109,267,1829,683,682,9,749],{"slug":6117,"featured":6,"template":686},"lee-tickett-my-gitlab-journey","content:en-us:blog:lee-tickett-my-gitlab-journey.yml","Lee Tickett My Gitlab Journey","en-us/blog/lee-tickett-my-gitlab-journey.yml","en-us/blog/lee-tickett-my-gitlab-journey",{"_path":6123,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6124,"content":6129,"config":6134,"_id":6136,"_type":14,"title":6137,"_source":16,"_file":6138,"_stem":6139,"_extension":19},"/en-us/blog/less-headaches",{"title":6125,"description":6126,"ogTitle":6125,"ogDescription":6126,"noIndex":6,"ogImage":5670,"ogUrl":6127,"ogSiteName":670,"ogType":671,"canonicalUrls":6127,"schema":6128},"Two DevOps platform superpowers: Visibility and actionability","Migrating to a DevOps platform helps organizations better understand and improve their development lifecycle.","https://about.gitlab.com/blog/less-headaches","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Two DevOps platform superpowers: Visibility and actionability\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-09-26\",\n      }",{"title":6125,"description":6126,"authors":6130,"heroImage":5670,"date":6131,"body":6132,"category":769,"tags":6133},[810],"2022-09-26","\nA [DevOps platform](/blog/the-journey-to-a-devops-platform/) deployed as a single application takes DevOps gains to the next level, enabling teams to deliver more value to their organization with fewer headaches. A platform, which includes the ability to plan, develop, test, secure, and operate software, empowers teams to deliver software faster, more efficiently, and more securely. And that [makes the business more competitive and more agile](/blog/the-devops-platform-series-building-a-business-case/).\n\n## DevOps visability and actionability\n\nA complete DevOps platform gives organizations everything they need to turn ideas into valuable and secure software without the time-consuming and costly headaches that multiple tools and multiple UXes bring. A single, end-to-end platform also gives teams one data store sitting underneath everything they do, and, regardless of the interface they are using, allows them to easily surface insights about developer productivity, workflow efficiency, and DevOps practice adoption.\n\nThere are many benefits to a DevOps platform, including visibility and actionability.\n\n### Gain visibility and context\n\nA DevOps platform enables DevOps teams to see and understand what’s happening in their organization, and provide context for those events. With insights that go much deeper than what a simple report or dashboard can offer, DevOps teams can better understand the status of projects, as well as their impact.\n\n### Take action more easily\n\nActionability means users can take that contextual information and efficiently and quickly do something with it at the point of understanding. Users can move a project ahead more quickly because they don’t have to wait to have a synchronous conversation or meeting to review the new information.\n\nHere are a few ways that an end-to-end platform provides visibility and actionability.\n\n### Track projects with epics and issues\n\nIn a DevOps platform, users are better able to communicate, plan work, and collaborate by using epics and issues. [Epics](https://docs.gitlab.com/ee/user/group/epics/) are an overview of a project, idea, or workflow. Issues are used to organize and list out what needs to be done to complete the larger goal, to track tasks and work status, or work on code implementations.\n\nFor instance, if managers want an overview of how multiple projects, programs, or products are progressing, they can get that kind of visibility by checking an epic, which will give them a high-level rollup view of what is being worked on, what has been completed, and what is on schedule or delayed. Users can call up an epic to quickly see what’s been accomplished and what is still under way, and then they can dig deeper into sub-epics and related issues for more information.\n\n[Issues](https://docs.gitlab.com/ee/user/project/issues/) offer details about implementation of specific goals, trace collaboration on that topic, and show which parts of the initiative team members are taking on. Users also can see whether due dates have been met or not. Issues can be used to reassign pieces of work, give updates, make comments or suggestions, and see how the nuts and bolts are being created and moved around.\n\n### Labels help track and search projects\n\nLabels are classification tags, which are often assigned colors and descriptive titles like \"bug\", \"feature request\", or \"docs\" to make them easy to understand. They are used in epics, issues, and merge requests to help users organize their work and ideas. They give users at-a-glance insight about what teams are working on a project, the focus of the work, and where it stands in the development lifecycle. Labels can be added and removed as work progresses to enable better tracking and searching.\n\n### Dashboards help track metrics\n\nDashboards are reporting tools that pull together metrics from multiple tools to create an at-a-glance view of projects, [security issues](/blog/secure-stage-for-appsec/), the health of different environments, or requests coming in for specific departments or teams, for instance. DevOps platform users can set up live dashboards to see trends in real time, map processes, and track response times, [errors](/blog/iteration-on-error-tracking/), and deployment speed. Dashboards also can be used to see alert statuses and the effect on specific applications and the business overall.\n\n### Value stream analytics\n\nFor visibility without any customization required, there are [value stream analytics](/blog/gitlab-value-stream-analytics/). This interface automatically pulls in data to show users how long it takes the team to complete each stage in their workflow – across planning, development, deployment, and monitoring. This gives developers or product owners – or anyone who wants information on workflow efficiency –  [a look at high-level metrics](/solutions/value-stream-management/), like deployment frequency. This is actionable information so it also shows what part of the project is taking the most time or what is holding up progress. Based on this information, the user can suggest changes, like moving milestones or assigning the work to someone new, and enact those changes with just one click.\n\nWith a DevOps platform, teams have end-to-end visibility that also is actionable. By enabling users to find the information they need with the context they need and giving them the ability to make immediate changes, data becomes actionable. Using a single platform, teams can move projects along more quickly, iterate faster, and create more value and company agility.\n\nCheck out our [Migrating to a DevOps platform eBook](https://page.gitlab.com/migrate-to-devops-guide.html?_gl=1*6p1rz*_ga*MTI3MzMwNjYwMi4xNjYyOTg0OTAw*_ga_ENFH3X7M5Y*MTY2Mzk0NDY1Mi4zOS4xLjE2NjM5NDQ2NjEuMC4wLjA.) for even more useful information about how to complete a successful DevOps platform migration\n\n",[9,2535,1040],{"slug":6135,"featured":6,"template":686},"less-headaches","content:en-us:blog:less-headaches.yml","Less Headaches","en-us/blog/less-headaches.yml","en-us/blog/less-headaches",{"_path":6141,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6142,"content":6148,"config":6154,"_id":6156,"_type":14,"title":6157,"_source":16,"_file":6158,"_stem":6159,"_extension":19},"/en-us/blog/lm-sre-shadow",{"title":6143,"description":6144,"ogTitle":6143,"ogDescription":6144,"noIndex":6,"ogImage":6145,"ogUrl":6146,"ogSiteName":670,"ogType":671,"canonicalUrls":6146,"schema":6147},"Shadowing a Site Reliability Engineer","On-call through the eyes of a software engineer. Read Laura's week shadowing a Site Reliability Engineer","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679120/Blog/Hero%20Images/sre-shadow-week.jpg","https://about.gitlab.com/blog/lm-sre-shadow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Shadowing a Site Reliability Engineer\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Laura Montemayor\"}],\n        \"datePublished\": \"2020-04-13\",\n      }",{"title":6143,"description":6144,"authors":6149,"heroImage":6145,"date":6151,"body":6152,"category":1359,"tags":6153},[6150],"Laura Montemayor","2020-04-13","\n{::options parse_block_html=\"true\" /}\n\n## A day in the life of a SRE at GitLab with Laura Montemayor\n\nHi! I’m [Laura Montemayor](https://gitlab.com/laura.Mon), a Frontend Engineer in GitLab’s [Monitor:Health Group](https://about.gitlab.com/handbook/engineering/development/ops/monitor/respond/). Monitor is one the [DevOps lifecycle stages](https://about.gitlab.com/stages-devops-lifecycle/) that comes after you’ve configured your production infrastructure and have deployed your application to it. We work on features that help you monitor metrics and Kubernetes, track errors, manage incidents, and more.\n\n_If you’re curious about our team, you can check out our [direction page](/direction/monitor/) to see what’s on the roadmap._\n\nA few weeks ago, I had the opportunity to  shadow our SREs, which was pretty exciting\nfor me, as it is monitoring in action.  An SRE is a **Site Reliability Engineer**,\nwho is responsible for keeping all user-facing services and other GitLab production\nsystems running smoothly.  \n\n_You can learn all about them in [our\nhandbook](https://handbook.gitlab.com/job-families/engineering/infrastructure/site-reliability-engineer/)._\n\n**Here are some of my general takeaways from that week:**\n\n#### Less is more when it comes to tooling\nI’ve had non-remote friends ask me what communications tools we use at GitLab, and\nthey are sometimes surprised that it’s not a long list. In a way, I had the same\nexpectation for our SREs and their tooling; I had visions of them all buried underneath monitors, with screens showing moving graphs, stats, logs - the works!. But SREs use\nthe same tools everyone at GitLab uses for communication - GitLab issues, Slack and\nZoom. The workflow is pretty straightforward; the on-call SRE gets pinged on\nPagerDuty, which also sends an alert to a Slack channel, and everything is discussed\neither there or in an issue. True to our [transparency](https://handbook.gitlab.com/handbook/values/#transparency) value,\neveryone at GitLab has access to the channels and the issues, and most of these issues are public.  \n\n_If you are interested in learning more about SREs and the tooling they use (and\nmore!), check out my [teammate’s blog\npost.](/blog/sre-shadow/)_\n\n![sre alert flow](https://about.gitlab.com/images/blogimages/sre-alert-flow.png)  \n_MT = Monitoring Tool_\n\n#### Managing the level of noise is a huge challenge\nDuring the time I shadowed, I probably saw an alert every 5 or so minutes.\nAnd this was during their so called “quiet time”! At first, it seems overwhelming to be alerted so often,\nand everything looked serious and actionable to me. _Queue latency outside of SLO?\nFilesystem will be full SOON?_ I panicked. However, a lot of these alerts are warnings\nand don’t all turn into incidents; but even if they are warnings, sometimes they\nmerit some investigation and discussion, since almost no alert can be truly ignored.\n\n##### Examples of alerts that can usually be ignored:\n1. **CPU/Memory usage that just stepped over 90%** - This can be ignored (but an eye\nshould be kept on it) because the prediction algorithms for usage often raise an\nalert prematurely. The usage normally falls to safe levels or won’t cause an issue\nfor a few more SRE shifts.\n2. **Abuse detection** - This can give a lot of false positives which are not\nnecessarily an incident, like abuse being detected when a project’s pipeline is used\nexcessively.\n\n#### Incidents don’t happen as often as I thought\nSo even though there are a lot of alerts, I was surprised to find the majority of them don’t actually turn into incidents. There was a part of me who wanted to experience an incident, since I\nthought going into a situation room and resolving it would be really exciting. But\nthere were only two incidents, and one of them was resolved straight away and the\nother one was handed over for further investigation. None of them were disruptive to\ncustomers though, so all in all, I figure GitLab must be pretty stable 💪\n\n#### Monitoring is hard (no surprise there!)\nExperience and domain knowledge certainly help when it comes to sifting through the\nnoise, but it doesn’t make it easy.  A lot of alerts happen because they are often\nbased on inflexible Boolean logic or some arbitrary threshold.\nFor example: we got an alert for a spike in the redis-cache latency Apdex.  We\nchecked the Grafana dashboard and realized it was a one off spike in latency, so it\nwas fine for the time being. But it was one more thing to keep an eye out for, and\nthe intermittent spiking kept alerting us, which added more cognitive noise. I can’t even begin to imagine the horror of trying to effectively monitor with a bunch of different tools. This  is why I love\nthe idea of a single app for all your needs 😉\n\n#### Communication is key\nOne thing that helps immensely with monitoring and incident management is\ncommunication. It is one of our [core values](https://handbook.gitlab.com/handbook/values/#collaboration) at GitLab after all, seeing as we are an\nall-remote, async team. If the SRE who is on-call cannot resolve the issue straight\naway, they can just ping other SREs on Slack and someone will be available to help.\nIt’s the advantage of being an async team - there’s always someone available in\nalmost every single timezone! It also means that even if you’re not on the SRE team\nand you’re working on something that will start firing off alerts, you should\ncommunicate with the SREs to let them know. For example: some changes were made in\nthe Pages API, which were rolled out to 20% of the domains. This caused an increase\nin alerts, but since it was properly communicated, the SRE on call could be assured\nthat these alerts were fine to ignore. There was no way of turning these alerts off\nunfortunately, since the rigidity of the tools doesn’t allow it.  \n\n[Here’s the issue](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/1734), if you’re curious.\n\n#### We love issues 🧡\nAt GitLab, we love writing everything down in issues, which helps a lot with incident\nmanagement. Every time an SRE changes shifts, they write a handover issue with a\nsummary of what alerts/incidents are ongoing or resolved, etc. Check out [this\nexample](https://gitlab.com/gitlab-com/gl-infra/on-call-handovers/-/issues/413).\nEvery time an alert actually becomes an incident, the first step is always to\nresolve it, since it affects our customers. After that, an issue is created to\ncollaboratively do a root cause analysis and hopefully prevent it from happening\nagain. Keeping record of all of the incidents is really helpful for context and\nresolution of future incidents.\n\n#### There must be collaboration\nOur SREs who are on-call do not work in isolation, and even though they are the first\npoint of contact for alerts, it’s not expected that they solve everything themselves.\nSometimes things can’t be solved in the moment, like the example below.  \n\n**Incident:** AlertManager is failing to send notifications  \n**Why weren’t we able to resolve it?** We don’t have visibility when Alertmanager doesn’t\nwork, as we don’t have logs or graphs to for it.  \n**So what now?** We first checked to see if there was a similar or a linked issue, and\nthere was. After that, we made an issue for it, linked the other issue, assigned a\ndomain expert, and went from there.  \n\n[Here’s the issue.](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/1707)\n\n#### Monitoring is a growing field\nGiven the imperfect nature of monitoring, it’s only natural that the field is\nconstantly evolving and growing. At GitLab, we recently created a [Scalability team](/handbook/engineering/infrastructure/team/scalability/),\nwhich will help curate our monitoring and alerting. They will frequently update the\ncriteria to generate alerts, which will make the whole process more manageable. My\nteam is responsible for building the features to help monitor and manage incidents,\nand we’re currently working on features which will help with our SREs workflow. If\nyou’re interested, check out our [Ops Section Product Vision](/direction/ops/) for more details. You can also check out the [reliability guide](/handbook/engineering/infrastructure/team/reliability/) for\na more in-depth and robust look at what our SREs do.  \n\n[Join us](/jobs/) at GitLab! Or consider [trying us\nout](/free-trial/) for free.\n",[9,1339],{"slug":6155,"featured":6,"template":686},"lm-sre-shadow","content:en-us:blog:lm-sre-shadow.yml","Lm Sre Shadow","en-us/blog/lm-sre-shadow.yml","en-us/blog/lm-sre-shadow",{"_path":6161,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6162,"content":6168,"config":6173,"_id":6175,"_type":14,"title":6176,"_source":16,"_file":6177,"_stem":6178,"_extension":19},"/en-us/blog/look-back-on-11-11-cicd",{"title":6163,"description":6164,"ogTitle":6163,"ogDescription":6164,"noIndex":6,"ogImage":6165,"ogUrl":6166,"ogSiteName":670,"ogType":671,"canonicalUrls":6166,"schema":6167},"Looking back on the 11.x releases for GitLab CI/CD","With GitLab 12.0 coming soon, it's a great time to reflect on all the features we've launched since 11.0.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666857/Blog/Hero%20Images/photo-cicdlookback.jpg","https://about.gitlab.com/blog/look-back-on-11-11-cicd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Looking back on the 11.x releases for GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jason Yavorska\"}],\n        \"datePublished\": \"2019-06-19\",\n      }",{"title":6163,"description":6164,"authors":6169,"heroImage":6165,"date":6170,"body":6171,"category":791,"tags":6172},[912],"2019-06-19","\nGitLab releases each month, so if you aren't paying close attention it can be easy to\nlose track of all the great features that are coming out. With an eye towards [CI/CD](/solutions/continuous-integration/)\nin particular, I'd like to take you through some of the highlights in each of our 11.x releases,\neach of which contributed to our strategy around cloud native CI/CD that has\nsecurity and smarts built right in, supports code reusability and live troubleshooting,\nand in general enables your team to make progress towards your goal of better, more\nreliable software delivery.\n\n![Release Badges](https://about.gitlab.com/images/blogimages/11x_release_logos.png){: .shadow.medium.center}\n\nFor those who don't know me, I'm the director of product for CI/CD and I've spent\nmy career (going all the way back to doing build automation of Windows 98 at my\nfirst corporate job) out of doing build and release automation and process. I love\nthis stuff, and my career move from building CI/CD implementations to building\nCI/CD tools for folks just like me has been one of the most rewarding things I've\ndone in my life. I hope that experience and passion comes through in the features\nwe've delivered – either way, I'd love to chat with you if you're a user of GitLab\nCI/CD. DM me on [Twitter](https://twitter.com/j4yav) or contact me via my [GitLab profile](https://gitlab.com/jyavorska) if you'd like to chat.\n\nAnyway, without further ado let's dive into the first 11.x release!\n\n## [GitLab 11.0](/releases/2018/06/22/gitlab-11-0-released/)\n\n### Auto DevOps Generally Available\n\nWe kicked off the 11.0 series in June 2018 by launching [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/).\nBeyond making it easy to host and collaborate on public and private repositories,\nGitLab also simplifies the rest of the process by offering the whole delivery toolchain,\nbuilt in and automated: Simply commit your code and Auto DevOps can do the rest.\nAuto DevOps is a pre-built, fully featured CI/CD pipeline that takes the best of\nGitLab CI/CD features, adds a lot of smarts around auto-detecting what's in your\nproject, and automates the entire delivery process to your Kubernetes cluster.\n\nCheck out our [quick-start guide](https://docs.gitlab.com/ee/topics/autodevops/cloud_deployments/auto_devops_with_gke.html)\nif you haven't had a chance to play with it yet – you might be surprised what it's\ncapable of out of the box.\n\n![Auto DevOps](https://about.gitlab.com/images/11_0/auto-devops.png){: .shadow.medium.center}\n\n### Job logs in the Web IDE\n\nTying operational deployments/execution together with development is also a priority\nfor GitLab. In 11.0 we made the CI status of the current commit available in the status\nbar of the Web IDE, and made it possible to view the [status and the logs for each job on the right](https://docs.gitlab.com/ee/user/project/web_ide/#view-ci-job-logs).\nThis made it easy to fix a merge request with CI failures by opening the failed job\nright alongside your code.\n\n![Web IDE trace](https://about.gitlab.com/images/11_0/web_ide_ci_trace.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [CI/CD pipeline jobs integrated with the Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/#view-ci-job-logs)\n- [Variable-defined deployment policies for Canary deployments](https://docs.gitlab.com/ee/topics/autodevops/#deploy-policy-for-canary-environments)\n- [Specify deployment strategy from Auto DevOps settings](https://docs.gitlab.com/ee/topics/autodevops/#auto-deploy)\n\n---\n\n## [GitLab 11.1](/releases/2018/07/22/gitlab-11-1-released/)\n\n### Security reports in pipeline view\n\nSecurity was another important area of focus for us throughout the 11.x series. We\nalready had security reports in the MR before this release, but here we also\nadded status for branches so this information can be acted upon even earlier.\nGitLab 11.1 (July 2018) completed the [set of security reports shown in the pipeline view](https://docs.gitlab.com/ee/user/project/merge_requests/#security-reports),\nadding both Container Scanning and DAST. From there you could now simply review\nthe Reports tab to access all security information and take action.\n\n![Security Reports](https://about.gitlab.com/images/11_1/security_reports.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [Redesign of the merge request and pipeline info sections](https://docs.gitlab.com/ee/user/project/merge_requests/)\n- [Improved Kubernetes cluster page design](https://docs.gitlab.com/ee/user/project/clusters/)\n\n---\n\n## [GitLab 11.2](/releases/2018/08/22/gitlab-11-2-released/)\n\n### Custom templates at the instance level\n\nIn 11.2 (August 2018) we also introduced [custom templates at the instance level](https://docs.gitlab.com/ee/administration/custom_project_templates.html),\nmaking it easy for organizations to set up a basic template for how they want\ntheir CI/CD pipelines to run. Development teams can grab a copy of the template\nand go, confident their following their organizational processes. Our enterprise\ncustomers are very important to us, and this feature came directly from the great\nfeedback we get from our customers.\n\n![Project Templates](https://about.gitlab.com/images/11_2/project-templates-instance.png){: .shadow.medium.center}\n\n### Kaniko for Docker Builds\n\nHistorically, building Docker images within a containerized environment had\nrequired compromises, using techniques like docker-in-docker on privileged\ncontainers. These solutions were often insecure and slow. In this release we\nmade the Runner compatible with [Kaniko](https://docs.gitlab.com/ee/ci/docker/using_kaniko.html),\na new tool developed by Google which is able to securely build an image within\nan unprivileged container. Cloud-first build technology is so important for the\njourney we want to take with our users, and supporting these kinds of foundational\ntechnologies that make your life easier are so nice to deliver.\n\n![Kaniko](https://about.gitlab.com/images/11_2/kaniko.png){: .shadow.medium.center}\n\n### JUnit test results in merge requests\n\nFinally, testing will always be an important part of any CI/CD pipeline. With the 11.2 release,\nwe made it possible to [see JUnit test results directly](https://docs.gitlab.com/ee/ci/testing/unit_test_reports.html)\nright from the CI view in the merge request widget, as part of our ongoing efforts\nto invest in full-spectrum integrated testing within GitLab.\n\n![JUnit Results](https://about.gitlab.com/images/feature_page/screenshots/junit-test-summaries-MR-widget.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [GitLab Runner in cloud native Helm Chart](https://docs.gitlab.com/charts/)\n- [Built-in project templates switched to use Dockerfiles](https://docs.gitlab.com/ee/user/project/working_with_projects.html#create-a-project)\n- [Manually stop an environment](https://docs.gitlab.com/ee/ci/environments/index.html#stopping-an-environment)\n\n---\n\n## [GitLab 11.3](/releases/2018/09/22/gitlab-11-3-released/)\n\n### Built-in Maven package repository\n\nFor any development organization, having an easy and secure way to manage\ndependencies is critical. Package management tools, such as Maven for Java\ndevelopers, provide a standardized way to share and version control these\nlibraries across projects. In GitLab 11.3 (September 2018), we opened up [Maven repositories built directly into GitLab](https://docs.gitlab.com/ee/user/packages/maven_repository/index.html).\nJava developers were now easily able to publish their packaged libraries to\ntheir project’s Maven repository: Just share a simple XML snippet with\nother teams looking to utilize that library, and Maven and GitLab will take care\nof the rest.\n\n![Maven Repo](https://about.gitlab.com/images/11_3/maven.png){: .shadow.medium.center}\n\n### Interactive Web Terminals\n\nCI/CD jobs are executed in the runner as part of pipelines, but this execution wasn't interactive.\nWhen they failed, it wasn't always easy to dig into details to spot the source of the problem.\n[Interactive web terminals](https://docs.gitlab.com/ee/ci/interactive_web_terminal/)\nbrought the capability to connect to a running or completed job and manually enter\ncommands to understand what’s happening in the system, and helped us move the story\nforward on empowering teams to deliver code, troubleshoot, and solve issues directly.\n\n![Web Terminal](https://about.gitlab.com/images/11_3/verify-webterm.png){: .shadow.medium.center}\n\n### Better includes with `extends` keyword\n\nReusing CI/CD code is a great way to help ensure consistency in software delivery,\nand also minimizes the amount of per-job scripting that’s needed to write and\nmaintain. As of 11.11, we began offering a powerful alternative approach\nfor code reuse in templates using [YAML `extends` keywords](https://docs.gitlab.com/ee/ci/yaml/#extends),\nexpanding upon our vision for reusability and compliance in the enterprise.\n\n![Extends](https://about.gitlab.com/images/11_3/verify-includes.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [Protected environments](https://docs.gitlab.com/ee/ci/environments/protected_environments.html)\n- [Auto DevOps enabled by default](https://docs.gitlab.com/ee/topics/autodevops/)\n- [Custom file templates for self-managed instances](https://docs.gitlab.com/ee/administration/settings/instance_template_repository.html)\n\n---\n\n## [GitLab 11.4](/releases/2018/10/22/gitlab-11-4-released/)\n\n### Feature Flags\n\nFeature Flags are a no-brainer to make software deliver easier, so you knew we'd eventually\nwant to include them in the GitLab single application. With the 11.4 release (October 2018) we delivered on\nthis promise by adding [Feature Flags](https://docs.gitlab.com/ee/operations/feature_flags.html),\nhelping teams to achieve continuous delivery by offering better options for incrementally\nrolling out changes and separating feature delivery from customer launch.\n\n![Feature Flags](https://about.gitlab.com/images/11_4/feature_flags.png){: .shadow.medium.center}\n\n### `only/except` rules for changes to files\n\nA very popular requested feature, in 11.4 we added the ability within the\n`.gitlab-ci.yml` to [use `only`/`except` rules for jobs](https://docs.gitlab.com/ee/ci/yaml/#only--except)\nbased on when modifications occur to a specific file or path (or glob). This allowed\nfor even more smarts in the pipeline, especially for monorepo/microservice-type\nuse cases, where the pipeline behavior can be optimized based on the changed files\nin the repository.\n\n![Only Except Changes](https://about.gitlab.com/images/11_4/verify-onlyexceptchanges.png){: .shadow.medium.center}\n\n### Timed incremental rollouts\n\nTeams already had the ability within Auto DevOps to set up incremental rollouts,\nbut with this release we added an option to also set up [timed incremental rollouts](https://docs.gitlab.com/ee/topics/autodevops/#timed-incremental-rollout-to-production)\nwhere the rollout will automatically continue forward on a timed cadence, making\nsure there is no error before continuing. This helped us push our vision for safe,\ncontinous deployment forward by providing teams with a new tool to have control over\ntheir code rollouts.\n\n![Timed Incremental Rollouts](https://about.gitlab.com/images/11_4/timed_incremental_rollouts.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [Moving `includes` from Starter to Core](https://docs.gitlab.com/ee/ci/yaml/#include)\n- [Auto DevOps support for RBAC](https://docs.gitlab.com/ee/topics/autodevops/)\n- [Filter admin runners view by type/state](https://docs.gitlab.com/ee/ci/runners/)\n- [Support for interactive web terminals with Docker executor](https://docs.gitlab.com/ee/ci/interactive_web_terminal/)\n- [Delayed jobs for pipelines](https://docs.gitlab.com/ee/ci/yaml/#whendelayed)\n\n---\n\n## [GitLab 11.5](/releases/2018/11/22/gitlab-11-5-released/)\n\n### Access control for Pages\n\nWith the 11.5 release (November 2018) we delivered a fantastic community-contributed feature which enabled\naccess control for Pages. From now on, instead of only supporting use cases where the\ncontent associated with the product is public, you could use Pages to build and\npublish protected content that should [only be accessible by project members](https://docs.gitlab.com/ee/user/project/pages/introduction.html#gitlab-pages-access-control).\nOperational documentation, internal secrets, or even just private planning or\nother information can now be confidently published via your pipelines in an\neasy-to-access way, with confidence that only the right people are able to see it.\n\n![Access Control Pages](https://about.gitlab.com/images/11_5/access-control-pages.png){: .shadow.medium.center}\n\n### Deploy Knative to your Kubernetes cluster\n\nBuilding [serverless applications](/topics/serverless/) enables teams to focus their time on making a\ngreat product and eliminates the need of provisioning, managing, and operating\nservers. Starting in GitLab 11.5, we enabled [deploying Knative to your existing Kubernetes cluster](https://docs.gitlab.com/ee/update/removals.html)\nwith a single click using the GitLab Kubernetes integration. Knative is a\nKubernetes-based platform to build, deploy, and manage modern serverless workloads.\nTasks that were once difficult, such as source-to-container builds, routing and\nmanaging traffic, and scaling-to-zero, now work effortlessly out of the box.\n\n![KNative](https://about.gitlab.com/images/11_5/knative.png){: .shadow.medium.center}\n\n### Parallel attribute for faster pipelines\n\nThe speed to delivery in a CI/CD environment can oftentimes be limited by the time it takes to complete the various tests in order to ensure the code is able to be shipped. With the `parallel` keyword in GitLab CI/CD, teams can quickly and easily parallelize these tests – accelerating the testing process and overall time to delivery.\n\n![Parallel](https://about.gitlab.com/images/11_5/parallel-keyword.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [Review Apps can now link directly to changed pages](https://docs.gitlab.com/ee/ci/environments/index.html#going-from-source-files-to-public-pages)\n- [New CI/CD syntax for security, quality, and performance report types](https://docs.gitlab.com/ee/ci/yaml/#artifactsreports)\n- [Additional information about deployments in merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/index.html#pipeline-status-in-merge-requests)\n\n---\n\n## [GitLab 11.6](/releases/2018/12/22/gitlab-11-6-released/)\n\n### GitLab Serverless\n\nBuilding on the Knative integration introduced in the previous month, 11.6's new, more\ncomprehensive [Serverless](https://docs.gitlab.com/ee/update/removals.html)\ncapability enabled users to easily define functions in their repository and have\nthem served and managed by Knative. Cloud native is such an important part of our\nroadmap, and it was really exciting to launch this feature while I was at KubeCon\nno less.\n\nBy simply defining your function data in the repo’s `serverless.yml` file and\nusing a `.gitlab-ci.yml` template, each function will be deployed to your cluster,\nwith Knative taking care of scaling your function based on request volume. This\nenables application developers to iterate quickly without having to worry about\nprovisioning or managing infrastructure.\n\n![Serverless](https://about.gitlab.com/images/11_6/serverless.png){: .shadow.medium.center}\n\n### Run pipeline jobs for merge requests\n\nRunning a given job only when dealing with a merge request was made much easier in 11.6. Using the\n`merge_requests` value with `only/except` keywords will allow you to configure jobs\nto run [only or except when in the context of a merge request](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html).\nThis allows finer control over pipeline behavior, and also provides access to new\nenvironment variables indicating the target branch and merge request ID to be used\nfor additional automated behaviors.\n\n![Merge Request Pipelines](https://about.gitlab.com/images/11_6/verify-mergerequestpipelines.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [Kubernetes clusters for groups](https://docs.gitlab.com/ee/user/group/clusters/)\n- [Pipelines are now deletable via API](https://docs.gitlab.com/ee/api/pipelines.html#delete-a-pipeline)\n- [Trigger variables are now hidden in UI by default](https://docs.gitlab.com/ee/ci/triggers/)\n\n---\n\n## [GitLab 11.7](/releases/2019/01/22/gitlab-11-7-released/)\n\n### Releases page\n\nThe 11.7 release (January 2019) added the ability to [create releases in GitLab](https://docs.gitlab.com/ee/user/project/releases/index.html)\nand view them on a summary page. Releases are a snapshot in time of the source,\nlinks, and other metadata or artifacts associated with a released version of your\ncode, and helps users of your project to easily discover the latest releases\nof your software.\n\nThis is a feature that was, as a career release manager, near and dear to my heart.\nI have so many plans around [Release Orchestration](/direction/release/release_orchestration/)\nthat build on this feature as a foundation. Being able to tie a milestone to\na release, a feature coming very soon, will open the door to tying together all\nkinds of interesting things happening in GitLab to a release. This isn't my forward-looking\nblog post so I won't go too far here, but I'll just say I can't wait to\ngo on that journey to build something really unique and powerful together with our users.\n\n![Releases Page](https://about.gitlab.com/images/11_7/release-releases_page.png){: .shadow.medium.center}\n\n### Expand upstream/downstream pipelines across projects\n\nWith 11.7 it became possible to [expand upstream or downstream cross-project pipelines](https://docs.gitlab.com/ee/ci/pipelines/index.html#visualize-pipelines)\nright from the pipeline view, giving you visibility into your end-to-end pipelines,\nno matter in which project they start or finish. It's one pattern we've been seeing\nmore and more of in GitLab, and we're adding more features to support. The reality of\ncontinuous delivery is complex orchestration across projects and even groups, so\nthis is a feature that was nice to get out the door to help make this easier.\n\n![Cross-Project Pipelines](https://about.gitlab.com/images/11_7/release-pipeline_expansion.png){: .shadow.medium.center}\n\n### NPM package repository\n\nIn January we also started offering [NPM registries](https://docs.gitlab.com/ee/user/packages/npm_registry/index.html)\nbuilt directly into GitLab. From this point teams can share a simple package-naming\nconvention to utilize that library in any Node.js project, and NPM and GitLab will\ndo the rest – all from a single, easy-to-use interface. Yet another step on our path\nto enable all kinds of repositories, built right into GitLab when you need them.\n\n![NPM Packages](https://about.gitlab.com/images/11_7/npm_package_view.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [Ability to configure Kubernetes app secrets as variables in Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/#application-secret-variables)\n- [API support for Kubernetes integration](https://docs.gitlab.com/ee/api/project_clusters.html)\n- [Short commit SHA available as environment variable](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html)\n- [Authorization support for fetching includes](https://docs.gitlab.com/ee/ci/yaml/#include)\n- [Skip CI builds during git push with `skip_ci` keyword](https://docs.gitlab.com/ee/ci/pipelines/#skip-a-pipeline)\n\n---\n\n## [GitLab 11.8](/releases/2019/02/22/gitlab-11-8-released/)\n\n### `trigger:` keyword for pipelines\n\nEven as of GitLab 9.3 you were able to create multi-project pipelines by triggering\na downstream pipeline via a GitLab API call in your job. In GitLab 11.8 (February 2019), we added\nfirst-class support for triggering these downstream pipelines with the [`trigger:`](https://docs.gitlab.com/ee/ci/yaml/#trigger)\nkeyword, instead of requiring teams to make an API call to trigger the downstream\npipeline. A bit more for those cross-project use cases that makes everything just\na little bit nicer to use.\n\n![Trigger](https://about.gitlab.com/images/11_8/multi_project_pipeline_graph.png){: .shadow.medium.center}\n\n### Pages support for subgroups\n\nPages was updated in 11.8 to [work with subgroups in GitLab](https://docs.gitlab.com/ee/administration/pages/),\ngiving you the ability to create Pages sites at that level as well. Sites set up in this\nway will have a URL in the format of `toplevel-group.gitlab.io/subgroup/project`,\nmaking them very easy to find.\n\n![Pages for SubGroups](https://about.gitlab.com/images/11_8/release-pages-subgroups.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [Several new templates for getting started quickly with GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/#getting-started)\n- [Auto DevOps support for environment-specific custom domain](https://docs.gitlab.com/ee/topics/autodevops/#environment-variables)\n- [Feature Flags was improved by making them environment-aware](https://docs.gitlab.com/ee/operations/feature_flags.html#define-environment-specs)\n- [CI_PAGES and CI_PAGES_URL added as helpful variables accessible to Pages pipelines](https://docs.gitlab.com/ee/user/project/pages/)\n- [.html extensions are now automatically resolved for Pages sites](https://docs.gitlab.com/ee/user/project/pages/)\n- [Tolerations were added to the Kubernetes executor](https://docs.gitlab.com/runner/executors/kubernetes.html#the-keywords)\n- [A new cleanup procedure for the Container Registry](https://docs.gitlab.com/ee/api/container_registry.html#delete-a-repository-tag)\n- [Force redeploy when Auto DevOps secrets are updated](https://docs.gitlab.com/ee/topics/autodevops/#environment-variables)\n\n---\n\n## [GitLab 11.9](/releases/2019/03/22/gitlab-11-9-released/)\n\n### Feature Flag auditability\n\nWith the 11.9 release (March 2019), operations like adding, removing, or changing Feature Flags\nare now [recorded in the GitLab audit log](https://docs.gitlab.com/ee/administration/audit_events.html),\ngiving you visibility into what is changing and when. If you’re having an incident\nand need to see what changed recently, or just need to look back as an auditor on\nhow your feature flags have been modified, this is now very easy to do. We have\nbig plans for Feature Flags, and also compliance built right into your pipelines.\nIt was great to knock out a two-for-one with this one.\n\n![Feature Flag audit events](https://about.gitlab.com/images/11_9/release-ffaudit.png){: .shadow.medium.center}\n\n### Security templates for pipelines\n\nGitLab security features evolve very fast, and they always need to be up to\ndate to be effective and protect your code. We know that changing the job\ndefinition is difficult if you have to manage multiple projects. As of this release we\ninclude bundled security templates [directly into your configuration](https://docs.gitlab.com/ee/user/application_security/sast/#configuring-sast),\nand have them updated with your system every time you upgrade to a new version of\nGitLab, without any change to any pipeline configuration required. Security plus\nreusability, a great combination.\n\n![Security Templates](https://about.gitlab.com/images/11_9/templates.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [Project templates for .NET, Go, iOS, and Pages](https://docs.gitlab.com/ee/user/project/working_with_projects.html#built-in-templates)\n- [Run specific jobs on merge requests only when files change](https://docs.gitlab.com/ee/ci/jobs/job_control.html#use-onlychanges-with-merge-request-pipelines)\n- [Auto DevOps build jobs for tags](https://docs.gitlab.com/ee/topics/autodevops/#auto-build)\n\n---\n\n## [GitLab 11.10](/releases/2019/04/22/gitlab-11-10-released/)\n\n### Pipeline dashboard\n\nIn 11.10 (April 2019) we added [pipeline status information to the Operations Dashboard](https://docs.gitlab.com/ee/user/operations_dashboard/).\nThis helps teams view the pipeline health of all the projects that they care about,\nall together in a single interface. Yet another step towards making pipelines across\nyour instance easy to understand and follow, this one was built in real-time coordination\nwith a customer, which is always a nice way to get something done. You get to build\nsomething that solves a real problem and collaborate directly with the folks who\nneed it.\n\n![Pipeline Dashboard](https://about.gitlab.com/images/11_10/cross-project-pipelines-dashboard.gif){: .shadow.medium.center}\n\n### Pipelines on merge results\n\nWhen working in a feature branch, it’s normal to have it diverge over\ntime from the target branch if you aren’t rebasing frequently. This can result\nin a situation where both the source and target branch’s pipelines are green and\nthere are no merge conflicts, but the combined output will result in a failed\npipeline due to an incompatibility between the changes.\n\nWith 11.10 it became possible for a pipeline to automatically create a new ref that\ncontains the combined merge result of the source and target branch, then\n[run the pipeline against that ref](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html)\n(what we call an `attached` state). In this way, GitLab can help teams keep their\nmaster branch green even when they have many teams merging into the release branch.\n\nTools and techniques built right into GitLab for keeping master green was a big\nfocus in the last few releases of 11.x, and will remain so for 12.x as well. Look\nfor [merge trains](https://gitlab.com/gitlab-org/gitlab-ee/issues/9186) to be built\non top of this foundation, and some really cool enhancements around sequencing and\nparallelization of them.\n\n![Merge Ref Pipeline](https://about.gitlab.com/images/11_10/merge_request_pipeline.png){: .shadow.medium.center}\n\n### Composable Auto DevOps\n\nAuto DevOps enables teams to adopt modern DevOps practices with little to no effort.\nStarting in GitLab 11.10 each job of Auto DevOps was made available as an\nindependent template. Using the includes feature of GitLab CI, users can [choose to bring in\nonly certain stages of Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/#using-components-of-auto-devops) while continuing to use their own custom\n`gitlab-ci.yml` for the rest. This helps teams to use just the desired jobs, while\ntaking advantage of any updates made upstream.\n\n![Composable Auto DevOps](https://about.gitlab.com/images/11_10/composable-auto-devops.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [More thorough Container Registry cleanup](https://docs.gitlab.com/omnibus/maintenance/#removing-unused-layers-not-referenced-by-manifests)\n- [Ability to purchase CI add-on runner minutes](https://docs.gitlab.com/ee/administration/settings/continuous_integration.html#extra-shared-runners-pipeline-minutes-quota)\n- [Change the cloning path for pipelines](https://docs.gitlab.com/ee/ci/runners/configure_runners.html#custom-build-directories)\n- [Simple masking of protected variables in logs](https://docs.gitlab.com/ee/ci/variables/#masked-variables)\n- [Enable/disable Auto DevOps at the group level](https://docs.gitlab.com/ee/topics/autodevops/#enablingdisabling-auto-devops-at-the-group-level)\n- [Group-level runners for group-level clusters](https://docs.gitlab.com/ee/user/group/clusters/#installing-applications)\n- [Control over `git clean` flags in pipeline jobs](https://docs.gitlab.com/ee/ci/runners/configure_runners.html#git-clean-flags)\n\n---\n\n## [GitLab 11.11](/releases/2019/05/22/gitlab-11-11-released/)\n\n### Windows Container Executor\n\nIn GitLab 11.11 (May 2019) we were very pleased to add a new executor to the GitLab Runner\nfor using [Docker containers on Windows](https://docs.gitlab.com/runner/executors/docker.html#using-windows-containers).\nPreviously, using the shell executor to orchestrate Docker commands was the primary\napproach for Windows, but with this update you are now able to use Docker\ncontainers on Windows directly, in much the same way as if they were on Linux\nhosts. This opened up the door for more advanced kinds of pipeline orchestration\nand management for our users of Microsoft platforms.\n\nAlso included with this update was improved support for PowerShell throughout GitLab\nCI/CD, as well as new helper images for various versions of Windows containers.\n\n![Windows Executor](https://about.gitlab.com/images/11_11/windows-container.png){: .shadow.medium.center}\n\n### Caching proxy for Container Registry\n\nLots of teams are using containers as part of their build pipelines, and our new\n[caching proxy](https://docs.gitlab.com/ee/user/packages/dependency_proxy/index.html) for\nfrequently used upstream images/packages introduced a great way to speed them up.\nBy keeping a copy of needed layers locally using the new caching proxy, you can\neasily improve execution performance for the commonly used images in your environment.\n\n![Dependency Proxy](https://about.gitlab.com/images/11_11/dependency-proxy-mvc.png){: .shadow.medium.center}\n\n### Chat notifications for deployments\n\nIn 11.11 deployment events were available to be [automatically shared in your team’s channel](https://docs.gitlab.com/ee/user/project/integrations/)\nthrough our Slack and Mattermost chat integrations, helping bring visibility to\nthese important activities that your teams need to be aware of.\n\n![Notifications](https://about.gitlab.com/images/11_11/release-slack-notification.png){: .shadow.medium.center}\n\n### Guest Access for Releases\n\nIt also became possible in this release for [guest users of your projects to view releases](https://docs.gitlab.com/ee/user/permissions.html#releases-permissions)\nthat you have published on the Releases page. They will be able to download your\npublished artifacts, but are prevented from downloading the source code or seeing\nrepository information such as tags and commits.\n\n![Guest Releases](https://about.gitlab.com/images/11_7/release-releases_page.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [Add-on runner minutes extended to free plans](https://docs.gitlab.com/ee/administration/settings/continuous_integration.html#extra-shared-runners-pipeline-minutes-quota)\n- [Access deployment details through environments API](https://docs.gitlab.com/ee/api/environments.html#get-a-specific-environment)\n- [Create a file directly from environment variable](https://docs.gitlab.com/ee/ci/variables/#variable-types)\n- [Run all manual jobs for a stage in one click](https://docs.gitlab.com/ee/ci/pipelines/index.html#add-manual-interaction-to-your-pipeline)\n\n---\n\n## In conclusion\n\nPhew... that was a lot of great features, and the team here at GitLab is really proud of\nwhat we delivered with this series of GitLab releases. I hope you found something\nthat you can take advantage of in your own CI/CD process. If you're interested in\nseeing where we're heading next, head over to our [CI/CD strategy page](/direction/ops/)\nand check out what's coming. Also, be sure to check out our 12.0 release post coming out on the 22nd of this month.\n\nOne of the things you may have noticed is that we frequently add new iterations\non our features, even month to month. We have a lot more iterations planned, both\nfor new and existing features, but what would you like to see in the next\nversion of your favorite feature? We'd love to hear – let us know in the\ncomments below.\n\nPhoto by [Zoltan Tasi](https://unsplash.com/photos/O_mBXldZ0hc?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/arrow?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,109,916],{"slug":6174,"featured":6,"template":686},"look-back-on-11-11-cicd","content:en-us:blog:look-back-on-11-11-cicd.yml","Look Back On 11 11 Cicd","en-us/blog/look-back-on-11-11-cicd.yml","en-us/blog/look-back-on-11-11-cicd",{"_path":6180,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6181,"content":6187,"config":6192,"_id":6194,"_type":14,"title":6195,"_source":16,"_file":6196,"_stem":6197,"_extension":19},"/en-us/blog/machine-learning-on-the-gitlab-devops-platform",{"title":6182,"description":6183,"ogTitle":6182,"ogDescription":6183,"noIndex":6,"ogImage":6184,"ogUrl":6185,"ogSiteName":670,"ogType":671,"canonicalUrls":6185,"schema":6186},"How Comet can streamline machine learning on The GitLab DevOps Platform","Here's a step-by-step look at how to bring ML into software development using Comet on GitLab's DevOps Platform.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669991/Blog/Hero%20Images/ways-to-encourage-collaboration.jpg","https://about.gitlab.com/blog/machine-learning-on-the-gitlab-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Comet can streamline machine learning on The GitLab DevOps Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Arias\"}],\n        \"datePublished\": \"2021-11-08\",\n      }",{"title":6182,"description":6183,"authors":6188,"heroImage":6184,"date":6189,"body":6190,"category":791,"tags":6191},[2141],"2021-11-08","\n\nBuilding machine learning-powered applications comes with numerous challenges. When we talk about these challenges, there is a tendency to overly focus on problems related to the quality of a model’s predictions—things like data drift, changes in model architectures, or inference latency. \n\nWhile these are all problems worthy of deep consideration, an often overlooked challenge in [ML development](/topics/devops/the-role-of-ai-in-devops/) is the process of integrating a model into an existing software application.  \n\nIf you’re tasked with adding an ML feature to a product, you will almost certainly run into an existing codebase that must play nicely with your model. This is, to put it mildly, not an easy task. \n\nML is a highly iterative discipline. Teams often make many changes to their codebase and pipelines in the process of developing a model. Coupling an ML codebase to an application’s dependencies, unit tests, and CI/CD pipelines will significantly reduce the velocity with which ML teams can deliver on a solution, since each change would require running these downstream dependencies before a merge can be approved.  \n\nIn this post, we’re going to demonstrate how you can use [Comet](https://www.comet.ml/site/) with [GitLab’s DevOps platform](/solutions/devops-platform/) to streamline the workflow for your ML and software engineering teams, allowing them to collaborate without getting in each other's way.      \n\n## The challenge for ML teams working with application teams\n\nLet’s say your team is working on improving a feature engineering pipeline. You will likely have to test many combinations of features with some baseline model for the task to see which combinations make an impact on model performance.     \n \nIt is hard to know beforehand which features might be significant, so having to run multiple experiments is inevitable. If your ML code is a part of your application codebase, this would mean having to run your application’s CI/CD pipeline for every feature combination you might be trying. \n\nThis will certainly frustrate your Engineering and DevOps teams, since you would be unnecessarily tying up system resources, given that software engineering teams do not need to run their pipelines with the same frequency as ML teams do.  \n\nThe other issue is that despite having to run numerous experiments, only a single set of outputs from these experiments will make it to your production application. Therefore, the rest of the assets produced through these experiments are not relevant to your application code.     \n\nKeeping these two codebases separated will make life a lot easier for everyone – but it also introduces the problem of syncing the latest model between two codebases.     \n\n## Use The GitLab DevOps Platform and Comet for your model development process\n\nWith The GitLab DevOps platform and Comet, we can keep the workflows between ML and engineering teams separated, while enabling cross-team collaboration by preserving the visibility and auditability of the entire model development process across teams.     \n\nWe will use two separate projects to demonstrate this process. One project will contain our application code for a handwritten digit recognizer, while the other will contain all the code relevant to training and evaluating our model.  \n\nWe will adopt a process where discussions, code reviews, and model performance metrics get automatically published and tracked within The GitLab DevOps Platform, increasing the velocity and opportunity for collaboration between data scientists and software engineers for machine learning workflows.\n\n## Project setup\n\nOur project consists of two projects: [comet-model-trainer](https://gitlab.com/tech-marketing/devops-platform/comet-model-trainer) and [ml-ui](https://gitlab.com/tech-marketing/devops-platform/canara-review-apps-testing). \n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/cometmodeltrainer.png){: .shadow}\n\nThe **comet-model-trainer** repository contains scripts to train and evaluate a model on the MNIST dataset. We have set up The GitLab DevOps Platform in a way that runs the training and evaluation Pipeline whenever a new merge request is opened with the necessary changes.\n\nThe **ml-ui** repository contains the necessary code to build the frontend of our ML application.\n\nSince the code is integrated with Comet, your ML team can easily track the source code, hyperparameters, metrics, and other details related to the development of the model.  \n\nOnce the training and evaluation steps are completed, we can use Comet to fetch summary metrics from the project as well as metrics from the Candidate model and display them within the merge request; This will allow the ML team to easily review the changes to the model. \n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/buildmodelgraph.png){: .shadow}\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/summarymetrics.png){: .shadow}\n\nIn our case, the average accuracy of the models in the project is 97%. Our Candidate model achieved an accuracy of 99%, so it looks like it is a good fit to promote to production. The metrics displayed here are completely configurable and can be changed as necessary.        \n\nWhen the merge request is approved, the deployment pipeline is triggered and the model is pushed to Comet’s Model Registry. The Model Registry versions each model and links it back to the Comet Experiment that produced it.  \n![Alt text for your image](https://about.gitlab.com/images/blogimages/OpenComet_SparkVideo.gif){: .shadow}    \n\nOnce the model is pushed to the Model Registry, it is available to the application code. When the application team wishes to deploy this new version of the model to their app, they simply have to trigger their specific deployment pipeline.     \n\n## Running the pipeline\n\n### Pipeline outline\n\nWe will run the process outlined below every time a team member creates a merge request to change code in the `build-neural-network`script:\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/modelapprove.png){: .shadow}\n\nNow, let’s take a look at the yaml config used to define our CI/CD pipelines depicted in the previous diagram:\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/workflowsbranch.png){: .shadow}\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/script.png){: .shadow}\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/registermodel.png){: .shadow}\n\nLet's break down the CI/CD pipeline by describing the gitlab-ci.yml file so you can use it and customize it to your needs.\n\nWe start by instructing our GitLab runners to utilize Python:3.8 to run the jobs specified in the pipeline: \n\n`Image: python:3.8`\n\nThen, we define the job where we want to build and train the neural network:\n\n`Build-neural-network`\n\n### Build-neural-network \n\nIn this step, we start by creating a folder where we will store the artifacts generated by this job, install dependencies using the requirements.txt file, and finally  execute the corresponding Python script that will be in charge of training the neural network. The training runs in the GitLab runner using the Python image defined above, along with its dependencies.\n\nOnce the `build-neural-network` job has finalized successfully, we move to the next job: `write-report-mr`\n\nHere, we use another image created by DVC that will allow us to publish a report right in the merge request opened by the contributor who changed code in the neural network script. In this way, we’ve brought software development workflows to the development of ML applications. With the report provided by this job, code and model review can be executed within the merge request view, enabling teams to collaborate not only around the code but also the model performance.\n\nFrom the merge request page, we get access to loss curves and other relevant performance metrics from the model we are training, along with a link to the Comet Experiment UI, where richer details are provided to evaluate the model performance. These details include interactive charts for model metrics, the model hyperparameters, and Confusion Matrices of the test set performance, to name a few. \n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/manualDeploy_SparkVideo.gif){: .shadow}\n\nWhen the team is done with the code and model review,  the merge request gets approved, and the script that generated the model is merged into the main codebase, along with its respective commit and the CI pipeline associated to it. This takes us to the next job: \n\n### Register-model\n\nThis job uses an integration between GitLab and Comet to upload the reviewed and accepted version of the model to the Comet Model Registry. If you recall, the Model Registry is where models intended for production can be logged and versioned. In order to run the commands that will register the model, we need to set up these variables: \n\n- COMET_WORKSPACE\n- COMET_PROJECT_NAME \n \nIn order to do that, follow the steps described [here](https://docs.gitlab.com/ee/ci/variables/#add-a-cicd-variable-to-an-instance).\n\nIt is worth noting that the `register-model` job only runs when the merge request gets reviewed and approved, and this behavior is obtained by setting `only: main` at the end of the job.\n\nFinally, we decide to let a team member have final control of the deployment so therefore we define a manual job:\n`Deploy-ml-ui`\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/deployuiml.png){: .shadow}\n\nWhen triggered, this job will import the model from Comet’s Model Registry and automatically create the necessary containers to build the user interface and deploy to a Kubernetes cluster. \n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/downstream.png){: .shadow}\n\nThis job triggers a downstream pipeline, which means that the UI for this MNIST application resides in a different project. This keeps the codebase for the UI and model training separated but integrated and connected at the moment of deploying the model to a production environment.\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/multipipeline_SparkVideo.gif){: .shadow}\n\n## Key takeaways\n\nIn this post, we addressed some of the challenges faced by ML and software teams when it comes to collaborating on delivering ML-powered applications. Some of these challenges include:\n\n* The discrepancy in the frequency with which each of these teams need to iterate on their codebases and CI/CD pipelines.\n\n* The fact that only a single set of experiment assets from an ML experimentation pipeline is relevant to the application.\n\n* The challenge of syncing a model or other experiment assets across independent codebases.   \n\nUsing The GitLab DevOps Platform and Comet, we can start bridging the gap between ML and software engineering teams over the course of a project. \n\nBy having model performance metrics adopted into software development workflows like the one we saw in the issue and merge request, we can keep track of the code changes, discussions, experiments, and models created in the process. All the operations executed by the team are recorded, can be audited, are end-to end-traceable, and (most importantly) reproducible. \n\nWatch a demo of this process:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/W_DsNl5aAVk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n_About Comet:_\nComet is an MLOps Platform that is designed to help data scientists and teams build better models faster! Comet provides tooling to Track, Explain, Manage, and Monitor your models in a single place! \n\nLearn more about Comet [here](https://www.comet.ml/site/) and get started for free!\n\n\n\n",[9,1731,231,1181],{"slug":6193,"featured":6,"template":686},"machine-learning-on-the-gitlab-devops-platform","content:en-us:blog:machine-learning-on-the-gitlab-devops-platform.yml","Machine Learning On The Gitlab Devops Platform","en-us/blog/machine-learning-on-the-gitlab-devops-platform.yml","en-us/blog/machine-learning-on-the-gitlab-devops-platform",{"_path":6199,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6200,"content":6206,"config":6211,"_id":6213,"_type":14,"title":6214,"_source":16,"_file":6215,"_stem":6216,"_extension":19},"/en-us/blog/making-builds-faster-autoscaling-runners",{"title":6201,"description":6202,"ogTitle":6201,"ogDescription":6202,"noIndex":6,"ogImage":6203,"ogUrl":6204,"ogSiteName":670,"ogType":671,"canonicalUrls":6204,"schema":6205},"How to make builds faster","How GitLab uses autoscaling to reduce build times and make developers happy.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673173/Blog/Hero%20Images/autoscaling-balance.jpg","https://about.gitlab.com/blog/making-builds-faster-autoscaling-runners","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to make builds faster\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-08-21\",\n      }",{"title":6201,"description":6202,"authors":6207,"heroImage":6203,"date":6208,"body":6209,"category":679,"tags":6210},[788],"2019-08-21","\nPicture this: It’s 5:30 pm on a Friday and a project manager has an urgent request. A\nbug is affecting a group of customers and it needs to be fixed ASAP. You find the discrepancy\nand, _phew_, it looks like it’s going to be a relatively easy fix. You make the update and\nstart the CI pipeline… and then you wait… and wait. Two hours later, you’re still waiting. What was\nsupposed to be a quick fix has turned into another long night sitting in a queue.\n\n[The team at Ticketmaster](/blog/continuous-integration-ticketmaster/) certainly felt the\npain with their Jenkins pipelines, and many [DevOps](/topics/devops/) teams are all too familiar with sluggish CI.\n\nSlow builds hinder development speed. Plus – they’re annoying. It’s just one more thing developers\nhave to deal with in order to do their jobs. Organizations might dedicate more servers to process\nthese builds in an effort to solve the problem, but often that creates more problems. More servers\nmean higher cloud and computing costs. When it comes to long builds, many developers have\nresigned themselves to just “grin and bear it.”\n\n## Making builds _faster_\n\n[Continuous integration](/solutions/continuous-integration/) allows you to run a number of tasks as you\nprepare to deploy your software, like building a software package or running tests. These tasks\nneed to be run by something. At GitLab we call these task enablers runners, though other [CI tools](/solutions/continuous-integration/) call them\nagents. Runners are an application that processes builds: If all of these runners are in use, work\nis queued until one becomes available. Let's say your peak usage is 100 jobs, but your average\nusage is around 25 jobs. You have to decide how many servers to provision. If you go with the\naverage, you will have to wait during peak usage times. So why not just add more runners? Some\nservices actually charge for each of these virtual machines, and if you’re not using them all\nthe time, those costs can add up. If you're on a cloud infrastructure, you're paying for that\nserver time – even when it's not doing anything.\n\nFor ops teams, it’s been a never-ending balancing act of having the right amount of runners\nfor the right amount of work. But tasks don’t happen in a vacuum – every team has slow times\nand busier times that are unpredictable.\n\nNobody likes waiting. With this universal truth in mind, we introduced autoscaling to GitLab Runners.\n\n## What are autoscaling runners?\n\nAutoscaling gives teams the ability to utilize resources in a more elastic and dynamic way. What\nthis means is that our runners can be configured so that machines are created _on demand_.\nThose machines, after the job is finished, can wait to run the next jobs or be removed automatically.\nYou can even specify the `IdleTime` of a server before it shuts off. Once runners are set up to\nautoscale, your infrastructure contains only enough capacity to handle the load.\n\nAutoscaling runners ensure builds can be processed more efficiently and you aren’t paying for\nmore machines than you need. Developers can focus on their code instead of worrying about\ntheir infrastructure environment, and ops teams no longer have to moonlight as soothsayers.\n\nThe only thing you need to take advantage of autoscaling is one GitLab instance and\none [GitLab Runner](https://docs.gitlab.com/runner#features) that can be installed for free.\nOur runner is written in Go and can run on any platform where you can build Go binaries\nincluding Linux, macOS, Windows, FreeBSD, and Docker.\n\nSee how the team at [Substrakt Health](https://substrakthealth.com/) set up an autoscaling\ncluster of GitLab CI/CD runners using Docker-Machine and AWS – and saved 90% on EC2 costs in the process.\n\n[Read their story.](/blog/autoscale-ci-runners/)\n{: .alert .alert-gitlab-purple .text-center}\n\nSpeed and efficiency are important cornerstones of effective DevOps, so waiting for builds has\nalways felt like a step backward. As everyone strives to deploy more software, it seems only right\nthat your architecture be up for the task. Autoscaling runners let DevOps teams focus on what\nthey do best: Deploying better, faster software (yes, even on a Friday).\n\nPhoto by [Austin Neill](https://unsplash.com/@arstyy?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com)\n{: .note}\n",[109,916,9],{"slug":6212,"featured":6,"template":686},"making-builds-faster-autoscaling-runners","content:en-us:blog:making-builds-faster-autoscaling-runners.yml","Making Builds Faster Autoscaling Runners","en-us/blog/making-builds-faster-autoscaling-runners.yml","en-us/blog/making-builds-faster-autoscaling-runners",{"_path":6218,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6219,"content":6225,"config":6231,"_id":6233,"_type":14,"title":6234,"_source":16,"_file":6235,"_stem":6236,"_extension":19},"/en-us/blog/making-remote-work-better",{"title":6220,"description":6221,"ogTitle":6220,"ogDescription":6221,"noIndex":6,"ogImage":6222,"ogUrl":6223,"ogSiteName":670,"ogType":671,"canonicalUrls":6223,"schema":6224},"Tangram Vision engineers succeed at remote work with GitLab","The start-up's developers can collaborate efficiently, handling everything from merge requests to code reviews, and providing a single source of the truth.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668018/Blog/Hero%20Images/allremote.jpg","https://about.gitlab.com/blog/making-remote-work-better","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's DevOps platform enables Tangram Vision's engineering team to succeed at remote work\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lauren Gibbons Paul\"}],\n        \"datePublished\": \"2022-04-21\",\n      }",{"title":6226,"description":6221,"authors":6227,"heroImage":6222,"date":6228,"body":6229,"category":769,"tags":6230},"GitLab's DevOps platform enables Tangram Vision's engineering team to succeed at remote work",[1707],"2022-04-21","\n\nOn March 14, 2020, Tangram Vision CEO Brandon Minor flew from Colorado into the Bay Area to meet with COO Adam Rodnitzky. The two had just launched [Tangram Vision](https://www.tangramvision.com/), the company they co-founded to make sensors simpler for robotics, drones, and autonomous vehicles. Their plan was to, each month, alternate working at each other's location. However, that week, the Covid-19 pandemic lockdown began, forcing them to scrap that plan and figure out how to successfully collaborate from afar.\n\n“We didn’t see each other in person again for a very long time. That kicked off our remote work experience,” Minor says.\n\nThe Tangram Vision engineering team started using GitLab's DevOps platform, which enabled them to work together without missing a beat. “GitLab was a key tool that allowed us to work really fluidly in a remote context,” says Minor. “Our engineering team has placed GitLab at the core of our remote workflow because it reinforces our values and perspectives around working well remotely.”\n\nThe Tangram Vision Platform takes care of complex perception tasks like sensor fusion, calibration, and diagnostics built on a scalable data backend that allows engineers to track, optimize, and analyze every sensor in their fleet. Tangram Vision’s SDK includes tools for rapid sensor integration, multi-sensor calibration, and sensor stability, saving robotics engineers months of engineering time.\n\n## Supporting complex collaboration\n\nPerception systems are notoriously hard to get up and running and then maintain over time because of important lower-level activities like sensor integration and calibration. “We make sure all the sensors' data is running smoothly, everything's working together perfectly to basically a plug-and-play level. And then we enable the developers working on top of that to monitor and correct their system over time,” Minor says. \n\nTangram Vision has just launched a user hub that functions as a centralized sensor data center. The user hub joins their multi-sensor calibration module, as well as a multiplexing module that maintains stream reliability for all connected sensors. Developers can access a starter set of perception development tools (Tangram Vision Platform - Basic), which will be available on an open-source hub. Much of the initial user feedback will come through and be managed within repositories hosted on GitLab, both public and private, Minor says.\n\n## GitLab as a core for code\n\nThe engineering team has evaluated other platforms, according to Greg Schafer, senior web architect. “We’ve looked around but we've been very turned off by them for one reason or another. We really haven't swayed in wanting to use GitLab as our core for code,” Schafer says. \n\nThe team uses GitLab to manage branches and merge requests (MRs), boosting efficiency and control. “We were having a bit of a struggle early on managing the short-term flow. It was hard to put down tasks to paper. So, I dove deep into GitLab to see how it could help us there. And now that's what we use. GitLab is my product management tool,” Minor says.\n\nThe alternative, siphoning MRs into tools like Notion and Slack, would have been too cumbersome. “Having code-focused discussions in those places would've been very awkward vs. our current orientation of having those discussions in GitLab. Having that history of MRs and threads has been very useful,” Schafer says.\n\nDoing all of the code reviews in the MR itself builds a paper trail of documentation for the future. That means the team can look back at exactly when a change was introduced and find any discussion about potential trade-offs next to a change. This gives the engineers confidence in understanding the context behind a change months or years after it has been introduced. “It encourages team members to be able to work asynchronously, as that context is not held in any single individual’s head but instead written and made explicit,” Minor says.\n\n## A host of features and options in GitLab\n\nFor Rodnitzky, what stands out about GitLab is that it has a host of features and options in one place. “It’s not just hosting code and MRs and all those discussions and things around that, but also the [continuous integration/continuous delivery], having that tightly integrated is really helpful,” he says. For example, there are different types of reports that might show up on the MRs. GitLab makes it easy to reference different CI steps in the MRs. \n\n“You're not jumping to different websites or services to do that. It’s all in one place, which is super helpful,” he says.\n\nMinor agrees, and adds, “The amount of oversight I have into every process going on, the transparency that gives me as a product manager to make the next decision has been invaluable.” \n\nIt’s not a stretch to say the transparency enabled by GitLab is reflected in Tangram Vision’s business model. “We’re transparent with our customers and developers,” says Minor. “There are a couple of morsels of code that will be private for a while, but, for the most part, the mission of the company is to make any engineer a computer vision engineer. To do that, a lot of education and openness is required. That’s already part of our culture.”\n",[749,793,9],{"slug":6232,"featured":6,"template":686},"making-remote-work-better","content:en-us:blog:making-remote-work-better.yml","Making Remote Work Better","en-us/blog/making-remote-work-better.yml","en-us/blog/making-remote-work-better",{"_path":6238,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6239,"content":6244,"config":6249,"_id":6251,"_type":14,"title":6252,"_source":16,"_file":6253,"_stem":6254,"_extension":19},"/en-us/blog/making-the-case-for-a-devops-platform-what-data-and-customers-say",{"title":6240,"description":6241,"ogTitle":6240,"ogDescription":6241,"noIndex":6,"ogImage":928,"ogUrl":6242,"ogSiteName":670,"ogType":671,"canonicalUrls":6242,"schema":6243},"Making the case for a DevOps platform: What data and customers say","Don't just take our word for why a DevOps platform means better DevOps and faster, safer releases: here's what the latest data shows and how customers have benefitted.","https://about.gitlab.com/blog/making-the-case-for-a-devops-platform-what-data-and-customers-say","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Making the case for a DevOps platform: What data and customers say\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2021-09-08\",\n      }",{"title":6240,"description":6241,"authors":6245,"heroImage":928,"date":6246,"body":6247,"category":769,"tags":6248},[851],"2021-09-08","\n_Our [2022 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/previous/2022/) is out now! Learn the latest in DevOps insights from over 5,000 DevOps professionals._\n\nIn the struggle to release safer software faster, development teams are increasingly choosing a DevOps platform to help them get there. In our [2021 Global DevSecOps Survey](/developer-survey/) we asked respondents what their DevOps practices included and a \"DevOps platform\" was among the top four choices, right next to CI/CD, test automation, and DevSecOps.\n\nWe're of course bullish on the idea of a DevOps platform, but we're far from alone. Here's a fresh look at how the data – and the customers – support the optimistic trajectory of a DevOps platform.\n\n## DevOps is hot\n\nThe DevOps market was worth $6 billion in 2020, according to Global Industry Analysts, and five-year growth forecasts range from $17 billion to as much as $23 billion, depending on the firm. \n\n**[Watch a [deep dive into GitLab's DevOps Platform](https://www.youtube.com/watch?v=wChaqniv3HI)]**\n\nThis probably doesn't need saying, but one reason the market is so strong is that DevOps works. In late 2020, Forrester Research conducted \"The State of Modern Technology Operations Q4 2020,\" and concluded [\"the DevOps hypothesis is sound\"](https://go.forrester.com/blogs/the-devops-hypothesis-is-sound-introducing-the-2020-state-of-modern-technology-operations-survey/). The report went further to say that companies successfully working in a DevOps/Agile model were able to release faster and thus have higher revenue growth. \n\n## A DevOps platform is the logical next step\n\nBut in order to do DevOps a team needs tools, and too many tools results in a toolchain, which is where things can get very messy quickly. Time consuming handoffs, integrations and maintenance lead to what Forrester calls the \"DevOps tax\" of roughly 10%, meaning teams have to spend that much of their time each month just trying to keep the toolchains running. (In [our 2021 Survey](/developer-survey/), the tax was even higher: 20% of survey takers said they spend between 11% and 20% of their time just on toolchain maintenance and integration).\n\n**[Use a DevOps platform to [avoid the DevOps tax](/topics/devops/use-devops-platform-to-avoid-devops-tax/)]**\n\nA DevOps platform with end-to-end visibility and everything in one place eliminates the tax and boosts DevOps performance. Nearly 12% of survey respondents told us that adding a DevOps platform has allowed them to release software faster. Overall, our survey takers said the use of a DevOps platform resulted in better DevOps, improved collaboration, easier automation and more comprehensive visibility/traceability. \n\nOne developer put it succinctly: \"[Using a DevOps platform] means reduced mean time to recovery (MTTR), quicker time to market, reduced lead time for fixes, and fewer change failures.\"\n\nAnd if all of that wasn't enough, a single DevOps platform gives *everyone* in the company the ability to see and participate in the process. In fact, 23% of our survey takers said everyone in their company – not just Dev and Ops – actually uses the DevOps platform. \n\n## DevOps platforms in the real world\n\nHow do teams really take advantage of a DevOps platform?\n\n[BI Worldwide](/customers/bi-worldwide/), a global engagement agency, found the ability to tie all the processes together made a difference. \"One tool for SCM+CI/CD was a big initial win,\" says Adam Dehnel, product architect at BI. \"Now wrapping security scans into that tool as well has already increased our visibility into security vulnerabilities. The integrated Docker registry has also been very helpful for us. Issue/Product management features let everyone operate in the same space regardless of role.\"\n\n**[How to [get the most out of your DevOps platform](/topics/devops/seven-tips-to-get-the-most-out-of-your-devops-platform/)]**\n\nLess turned out to be more at [Glympse](/customers/glympse/), a geo-location sharing service provider that consolidated close to 20 different tools into GitLab. \"Development can move much faster when engineers can stay on one page and click buttons to release auditable changes to production and have easy rollbacks; everything is much more streamlined,\" explains Zaq Wiedmann, lead software engineer at Glympse. \"Within one sprint, just 2 weeks, Glympse was able to implement security jobs across all of their repositories using GitLab's CI templates and their pre-existing Docker-based deployment scripts.\"\n\nWant a more detailed look at the role a DevOps platform can play in your organization? Explore our [comprehensive guide to DevOps platforms](/topics/devops-platform/).\n\n_Our [2022 Global DevSecOps Survey](/developer-survey/) is out now! Learn the latest in DevOps insights from over 5,000 DevOps professionals._\n\n## Read more about the DevOps Platform:\n\n- [The journey to a DevOps Platform](/blog/the-journey-to-a-devops-platform/)\n\n- [How ten steps over ten years led to the DevOps Platform](/blog/how-ten-steps-over-ten-years-led-to-the-devops-platform/)\n\n- [Agile planning with a DevOps platform](/blog/agile-planning-with-a-devops-platform/)\n\n- [Welcome to the DevOps Platform era](/blog/welcome-to-the-devops-platform-era/)\n\n- [It's time to build more accessible software. A DevOps platform can help](/blog/how-the-devops-platform-makes-building-accessible-software-easier/)\n",[9,681,1829],{"slug":6250,"featured":6,"template":686},"making-the-case-for-a-devops-platform-what-data-and-customers-say","content:en-us:blog:making-the-case-for-a-devops-platform-what-data-and-customers-say.yml","Making The Case For A Devops Platform What Data And Customers Say","en-us/blog/making-the-case-for-a-devops-platform-what-data-and-customers-say.yml","en-us/blog/making-the-case-for-a-devops-platform-what-data-and-customers-say",{"_path":6256,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6257,"content":6263,"config":6268,"_id":6270,"_type":14,"title":6271,"_source":16,"_file":6272,"_stem":6273,"_extension":19},"/en-us/blog/manage-agile-teams-with-microservices",{"title":6258,"description":6259,"ogTitle":6258,"ogDescription":6259,"noIndex":6,"ogImage":6260,"ogUrl":6261,"ogSiteName":670,"ogType":671,"canonicalUrls":6261,"schema":6262},"How to manage Agile teams with microservices","GitLab Groups and Projects can help teams divide work by product or system.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669575/Blog/Hero%20Images/agilemultipleteams.jpg","https://about.gitlab.com/blog/manage-agile-teams-with-microservices","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to manage Agile teams with microservices\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2019-08-23\",\n      }",{"title":6258,"description":6259,"authors":6264,"heroImage":6260,"date":6265,"body":6266,"category":679,"tags":6267},[702],"2019-08-23","\n\nWe’re getting closer to the 2019 finish line, but there’s still time to jump on\nthe microservices train to accelerate your team’s delivery. We’ve written about\nmicroservices in the past, including discussing\n[best practices for microservices implementation](/blog/strategies-microservices-architecture/)\nand [GitLab’s integrated vision for microservices](/blog/microservices-integrated-solution/),\nbut I’m here to share something a little different: How you can use microservices\nto manage your team.\n\nBut first, a recap: Microservices is a collection of independently deployable\nservices that advances a goal, with each application managing a specific function\n_really_ well.\n\n> “The term ‘Microservice Architecture’ has sprung up over the last few years to\ndescribe a particular way of designing software applications as suites of\nindependently deployable services.” –\n[Martin Fowler](https://martinfowler.com/articles/microservices.html)\n\n## GitLab microservices for Agile team management\n\nUsing GitLab [Projects](https://docs.gitlab.com/ee/user/project/) and\n[Groups](https://docs.gitlab.com/ee/user/group/), teams can organize their work\nto increase visibility and collaboration. GitLab supports Agile teams by providing\n[Milestones](https://docs.gitlab.com/ee/user/project/milestones) (or sprints),\n[Issues](https://docs.gitlab.com/ee/user/project/issues/) (or user stories),\n[Weights](https://docs.gitlab.com/ee/user/project/issues/issue_weight.html) (or points and estimation),\nand other common [Agile artifacts](/blog/gitlab-for-agile-software-development/).\n\nHere are a few ways to use groups and projects:\n\n### Organizing your team by system\n\nOne of the more traditional ways to divide work, organizing by system separates\nteams by component and subsystem. For example, the teams that handle mobile iOS,\nmobile Android, and website have different projects, each with their own code\nrepo and [issue tracker](https://docs.gitlab.com/ee/user/project/issues/). This\ntype of structure works well with operations-driven organizations, but it’s not\na modern approach, so we recommend one of the following structures instead.\n\n### Organizing your team by product area\n\nDividing work by product is a best practice that drives business value. Using\nGitLab Groups, you can create `Code` and `Teams`. Within `Code`, separate projects\nrepresent various components (e.g. mobile iOS and user accounts), with individual\ncode repositories and sets of [merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/).\nOnce you’ve created your projects (and code repos), you can build another group\nfor `Teams`, which includes fullstack product teams (i.e., engineers, PMs, designers),\nenabling parallel milestones and Agile boards. The benefit of organizing work by\nproduct area is that there’s a separation between code repos and work, so that\nevery piece of code in your organization is open to contributions from all teams.\n\n### Organizing your team with a hybrid approach\n\nThis approach combines both product and system organization structures and is\nwell suited for organizations that have cross-platform teams. For example, a mobile\nteam has dedicated iOS and Android engineers rather than full teams for both\nplatforms. In this model, the `Code` group will have individual projects according\nto component, but `Teams` is consolidated so that there’s only a website and mobile\nteam.\n\nWatch this demo and check out its\ncorresponding [example application](https://gitlab.com/trustful-finance-demo) to see groups and projects in action. 🍿\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/VR2r1TJCDew\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\nDoes your team use microservices for Agile development? We’d love to hear your\nthoughts.\n\nCover image by [Martin Sanchez](https://unsplash.com/@martinsanchez?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/MD6E2Sv__iA)\n{: .note}\n",[855,749,9,683],{"slug":6269,"featured":6,"template":686},"manage-agile-teams-with-microservices","content:en-us:blog:manage-agile-teams-with-microservices.yml","Manage Agile Teams With Microservices","en-us/blog/manage-agile-teams-with-microservices.yml","en-us/blog/manage-agile-teams-with-microservices",{"_path":6275,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6276,"content":6282,"config":6287,"_id":6289,"_type":14,"title":6290,"_source":16,"_file":6291,"_stem":6292,"_extension":19},"/en-us/blog/manage-it-alerts-with-gitlab",{"title":6277,"description":6278,"ogTitle":6277,"ogDescription":6278,"noIndex":6,"ogImage":6279,"ogUrl":6280,"ogSiteName":670,"ogType":671,"canonicalUrls":6280,"schema":6281},"How we manage IT Alerts in GitLab","Triaging alerts just got easier with GitLab because you can investigate and remediate outages in a single tool.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681461/Blog/Hero%20Images/manage-it-alerts-in-gitlab.png","https://about.gitlab.com/blog/manage-it-alerts-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we manage IT Alerts in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarah Waldner\"}],\n        \"datePublished\": \"2020-08-03\",\n      }",{"title":6277,"description":6278,"authors":6283,"heroImage":6279,"date":6284,"body":6285,"category":726,"tags":6286},[5560],"2020-08-03","\n\nIt’s 2 a.m. Monday morning.\n\nYour phone screen lights up and buzzes. Lo and behold, the alert is serious and there is likely a severe incident ongoing with your service.\n\nYou check Slack to see if anyone else is involved. Next, you log into your monitoring tool to review the alert and do a quick triage hoping that the cause and solution are straightforward. The next 30 minutes are spent frantically bouncing around between five to six different tools, digging for clues in metrics, events, traces, logs, and release tools, hoping you can correlate a recent deployment to the incident. After another team member finally joins the firefight, you spend precious time getting them up to speed. After that, your boss calls. At this time, an hour has passed, you are no closer to the root cause.\n\nDoes this situation sound familiar?\n\nThere are so many jobs to be done during an incident: Communicating using multiple channels, facilitating collaboration, documenting findings and the timeline, and assessing metrics, logs, traces, and errors to diagnose problems. This process can be manual, time-consuming, and stressful for incident responders.\n\nWouldn’t it be great if most of this is automated and centralized in one place?\n\nEnter, GitLab alert and incident management\n\nOur vision is to free up more time for incident responders to actually respond to incidents by automating resource management, communications, correlating observability data and metadata, and executing runbooks. Since GitLab is a single app for your entire [DevOps](/topics/devops/) lifecycle, the bonus of using GitLab to triage IT alerts and manage incidents is that you are doing so in the same tool you are already using - everything is colocated to help you remediate problems faster.\n\n## What can I do today?\n\nWe are in the midst of building an Operations Command Center where you can investigate, respond to, and remediate IT incidents all in one interface.\n\nAvailable today, GitLab includes the following highlighted functionality:\n\n- Aggregate IT alerts in a single interface (GitLab) via our [generic webhook receiver](https://docs.gitlab.com/ee/operations/incident_management/integrations.html)\n- Triage multiple alerts in a [list view](https://docs.gitlab.com/ee/operations/incident_management/alerts.html)\n- Indicate ownership of critical alerts by [changing the status](https://docs.gitlab.com/ee/operations/incident_management/alerts.html)\n- Delegate responsibility by [assigning alerts](https://docs.gitlab.com/ee/operations/incident_management/alerts.html#assign-an-alert)\n- Promote alerts to incidents by [creating GitLab issues](https://docs.gitlab.com/ee/operations/incident_management/alerts.html#create-an-incident-from-an-alert)\n- [Investigate the metrics](https://docs.gitlab.com/ee/operations/incident_management/alerts.html#metrics-tab) directly in the alert\n\n## What is coming soon?\n\nAlert and incident management tools are the main focus of the [Health group](/handbook/engineering/development/ops/monitor/respond/) within the [Monitor stage](/direction/monitor/). In the next few milestones, we anticipate releasing:\n\n- Embedded [logs](https://gitlab.com/gitlab-org/gitlab/-/issues/231395) for GitLab Alerts\n- Linked [runbooks in alerts](https://gitlab.com/groups/gitlab-org/-/epics/1436)\n- A custom [integration builder](https://gitlab.com/gitlab-org/gitlab/-/issues/217766) to integrate any alerting source with GitLab\n- An [incident dashboard](https://gitlab.com/gitlab-org/gitlab/-/issues/219542) to manage active outages\n\n## We want to hear from you!\nAs per usual, we, at GitLab, listen closely to our community and we like to give you direct access to the ideas we are considering for our product. If you want to contribute to building [Incident Management](https://gitlab.com/groups/gitlab-org/-/epics/349) tools, please check out the linked epic to see what we have in the near-term. We love your feedback and we would love to receive your merge requests even more.\n\n## Read more about our monitoring tools:\n\n- [Why we scoped down to build up error tracking](/blog/iteration-on-error-tracking/)\n- [How application performance metrics helps developers](/blog/working-with-performance-metrics/)\n- [Understand incident management with GitLab](/blog/incident-management-with-gitlab/)\n",[9,916],{"slug":6288,"featured":6,"template":686},"manage-it-alerts-with-gitlab","content:en-us:blog:manage-it-alerts-with-gitlab.yml","Manage It Alerts With Gitlab","en-us/blog/manage-it-alerts-with-gitlab.yml","en-us/blog/manage-it-alerts-with-gitlab",{"_path":6294,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6295,"content":6301,"config":6306,"_id":6308,"_type":14,"title":6309,"_source":16,"_file":6310,"_stem":6311,"_extension":19},"/en-us/blog/manager-of-frances-fr-domain-selects-gitlab",{"title":6296,"description":6297,"ogTitle":6296,"ogDescription":6297,"noIndex":6,"ogImage":6298,"ogUrl":6299,"ogSiteName":670,"ogType":671,"canonicalUrls":6299,"schema":6300},"France's .fr domain manager selects GitLab for security","Afnic looks to The One DevOps Platform to modernize its software development with automation, security and compliance, and support for multi-cloud environments.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667869/Blog/Hero%20Images/afniclogo.png","https://about.gitlab.com/blog/manager-of-frances-fr-domain-selects-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Manager of France's .fr domain selects GitLab for its DevSecOps capabilities\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-05-19\",\n      }",{"title":6302,"description":6297,"authors":6303,"heroImage":6298,"date":3971,"body":6304,"category":769,"tags":6305},"Manager of France's .fr domain selects GitLab for its DevSecOps capabilities",[745],"Association Française pour le Nommage Internet en Coopération ([Afnic](https://www.afnic.fr/en/)) is a longstanding nonprofit in France that manages .fr domain names. Chosen 20 years ago by the French State to operate the .fr country code top-level domain, Afnic’s motto is “reliability first.” Afnic uses GitLab, The One DevOps Platform, to help sustain that motto through modernization of its software development environment.\n\nAfnic’s mission as the French National Top Level Domain Registry is to bring together public authorities, Internet users, and domain name professionals to build a secure and stable Internet, open to innovation and in which the French Internet community plays a leading role. Outages of such a digital service could prevent the provisioning of other services that rely on it and could thus have an impact on key economic and societal activities.\n\nAfnic started using GitLab about four years ago to build and secure the brand-new version of its Shared Registry System (SRS). The SRS is a platform that manages the domain names from the subscription of a domain name to the publication in the DNS database and all the updates during its life, including contacts, server names, and DNSSEC keys, according to Richard Coffre, Afnic’s principal product manager.\n\nSince the project began, all the technologies have changed. Previously, Afnic’s team was mainly using Java and Perl and now they use [Kubernetes](/solutions/kubernetes/), Angular, the latest version of Java, and Docker, among others. Security is paramount, and the team is using private clouds. That means Afnic has its own data centers in France and in colocation facilities all over the world.\n\n## Modernizing software development with automation and integration\n\nAfnic selected GitLab to automate and integrate processes during the deployment process. Previously, the majority of things were done manually and now Afnic’s team wants to follow [DevSecOps philosophy and governance](/topics/devsecops/). They wanted one DevOps platform with state-of-the-art [CI/CD](/topics/ci-cd/) capabilities, the ability to quickly onboard new developers, and features to improve compliance and monitoring functionality.\n\nNow, Gitlab is one of the core components of Afnic’s systems.\n\nThe company’s use of GitLab expanded as they deployed new versions of Java and Docker and other technologies. “We wanted to take a big step to align our technology with the state of the market,” Coffre says, and after surveying the development team, the choice was GitLab.\n\nThe team is integrating GitLab with Jira, which is providing a lot of value, he adds.\n\nNow, in addition to developers, Afnic’s database administrators and network administrators use GitLab. The team is using Docker for images and Ansible. Jira is used for ticketing issues and is linked to GitLab and Confluence as a wiki to create the documentation.\n\n## What GitLab brings to the table\n\nThe goal for Afnic is to increase automation and to have everything in the same place and for anyone to be able to get at the proper version anytime. “That's the strength of GitLab,\" Coffre says. “That's also why we chose it because it's one of the leaders. Like many modern source code management systems, GitLab allows our developers to concurrently create source code. But it does it easily, giving us the possibility to do it safely, remembering our motto.\"\n\nPreviously Afnic used only open source tools that they had to customize, which Coffre says was not efficient on a daily basis. To manage source code properly, the team syncs it to GitLab. The strong focus on community contributions “is a guarantee that its features match the developers’ needs, especially regarding CI/CD,” he adds. \n\nWhen new developers join Afnic, it is very easy to onboard them to GitLab, he says. Another benefit is the cost savings because developers don’t lose source code. There is a time-saving metric, too, because if there is an issue in GitLab, it just requires someone to patch it. \n\nNow developers can focus on higher-value strategic tasks like security and vulnerability compliance, and not manual tests and delays, etc. That frees up developers to focus on their job managing DNS databases because the GitLab platform manages the software development lifecycle end-to-end. Coffre says, “GitLab will provide the foundational platform for all Afnic’s software products moving forward. We have experienced great benefits so far and we are excited to expand our use of this platform into the future”.",[9,875,976,977],{"slug":6307,"featured":6,"template":686},"manager-of-frances-fr-domain-selects-gitlab","content:en-us:blog:manager-of-frances-fr-domain-selects-gitlab.yml","Manager Of Frances Fr Domain Selects Gitlab","en-us/blog/manager-of-frances-fr-domain-selects-gitlab.yml","en-us/blog/manager-of-frances-fr-domain-selects-gitlab",{"_path":6313,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6314,"content":6319,"config":6324,"_id":6326,"_type":14,"title":6327,"_source":16,"_file":6328,"_stem":6329,"_extension":19},"/en-us/blog/managers-more-optimistic-than-developers",{"title":6315,"description":6316,"ogTitle":6315,"ogDescription":6316,"noIndex":6,"ogImage":5207,"ogUrl":6317,"ogSiteName":670,"ogType":671,"canonicalUrls":6317,"schema":6318},"How do developers and managers feel about their jobs?","How do you assess job satisfaction? Here's a look inside the findings and methods of our Global Developer Report.","https://about.gitlab.com/blog/managers-more-optimistic-than-developers","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How do developers and managers feel about their jobs?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emily von Hoffmann\"}],\n        \"datePublished\": \"2018-03-20\",\n      }",{"title":6315,"description":6316,"authors":6320,"heroImage":5207,"date":6321,"body":6322,"category":679,"tags":6323},[4620],"2018-03-20","\n_Our [2022 Global DevSecOps Survey](/developer-survey/) is out now! Learn the latest in DevOps insights from over 5,000 DevOps professionals._\n\nOne of the goals of our [2018 developer survey](/developer-survey/previous/2018/) was to establish a benchmark for how satisfied software professionals generally are in their jobs. Using the detailed demographic information we captured at the beginning, we were able to sort and compare the opinions of different groups within our sample of over 5,000 respondents. One of our key findings was that, for all their differences, developers and managers agree with each other on a lot of things, but managers tend to have a slightly rosier outlook when their views diverge.\n\n\u003C!-- more -->\n\n### How we determined overall satisfaction\n\nSurveys are tricky, and humans are trickier, so we had to brainstorm a bit on what exactly we were interested in learning, and [how](http://www.pewresearch.org/methodology/u-s-survey-research/questionnaire-design/) we could coax out this information without introducing our own biases. We used a series of [likert scales](https://www.surveymonkey.com/mp/likert-scale/) to get at these groups’ perceptions of their autonomy, team dynamics, support, and other fuzzy things that we think can really drive happiness in a role (we also asked about details on tooling and workflow [later on](/developer-survey/previous/2018/) in the survey). We’ve [published before](https://medium.com/@gitlab/invite-your-engineers-to-talk-business-heres-why-485ce02c4d18) on what happens when your business and engineering teams are out of sync, and we wanted to ask about other symptoms of that same problem. Here are some of the questions, along with the raw data that we used to compare satisfaction between developers and management.\n\n\u003Cstyle type=\"text/css\">\n.tg  {border-collapse:collapse;border-spacing:0;}\n.tg td{font-family:Arial, sans-serif;font-size:14px;padding:10px 5px;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;}\n.tg th{font-family:Arial, sans-serif;font-size:14px;font-weight:normal;padding:10px 5px;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;}\n.tg .tg-9hbo{font-weight:bold;vertical-align:top}\n.tg .tg-yw4l{vertical-align:top}\n\u003C/style>\n\u003Ctable class=\"tg\">\n  \u003Ctr>\n    \u003Cth class=\"tg-9hbo\">Managers\u003C/th>\n    \u003Cth class=\"tg-9hbo\">%\u003C/th>\n    \u003Cth class=\"tg-9hbo\">Developers\u003C/th>\n    \u003Cth class=\"tg-9hbo\">%\u003C/th>\n  \u003C/tr>\n  \u003Ctr>\n    \u003Ctd class=\"tg-yw4l\">My team is set up to succeed\u003C/td>\n    \u003Ctd class=\"tg-yw4l\">84\u003C/td>\n    \u003Ctd class=\"tg-yw4l\">I feel set up to succeed\u003C/td>\n    \u003Ctd class=\"tg-yw4l\">75\u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n    \u003Ctd class=\"tg-yw4l\">My team is given realistic deadlines\u003C/td>\n    \u003Ctd class=\"tg-yw4l\">68\u003C/td>\n    \u003Ctd class=\"tg-yw4l\">I’m given realistic deadlines\u003C/td>\n    \u003Ctd class=\"tg-yw4l\">65\u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n    \u003Ctd class=\"tg-yw4l\">Project expectations are set up front\u003C/td>\n    \u003Ctd class=\"tg-yw4l\">60\u003C/td>\n    \u003Ctd class=\"tg-yw4l\">Project expectations are set up front\u003C/td>\n    \u003Ctd class=\"tg-yw4l\">50\u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n    \u003Ctd class=\"tg-yw4l\">My team rarely needs to sacrifice quality to meet a deadline\u003C/td>\n    \u003Ctd class=\"tg-yw4l\">53\u003C/td>\n    \u003Ctd class=\"tg-yw4l\">I rarely need to sacrifice quality to meet a deadline\u003C/td>\n    \u003Ctd class=\"tg-yw4l\">50\u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n    \u003Ctd class=\"tg-yw4l\">My team is able to make decisions about their work\u003C/td>\n    \u003Ctd class=\"tg-yw4l\">91\u003C/td>\n    \u003Ctd class=\"tg-yw4l\">It’s important to me to be able to make decisions about my work\u003C/td>\n    \u003Ctd class=\"tg-yw4l\">96\u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n    \u003Ctd class=\"tg-yw4l\">My team has the authority to make decisions\u003C/td>\n    \u003Ctd class=\"tg-yw4l\">88\u003C/td>\n    \u003Ctd class=\"tg-yw4l\">I have the authority to make decisions about my work\u003C/td>\n    \u003Ctd class=\"tg-yw4l\">83\u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n    \u003Ctd class=\"tg-yw4l\">My team's ideas and opinions are valued\u003C/td>\n    \u003Ctd class=\"tg-yw4l\">93\u003C/td>\n    \u003Ctd class=\"tg-yw4l\">My ideas and opinions are valued\u003C/td>\n    \u003Ctd class=\"tg-yw4l\">84\u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n    \u003Ctd class=\"tg-yw4l\">My team has access to the best development tools\u003C/td>\n    \u003Ctd class=\"tg-yw4l\">81\u003C/td>\n    \u003Ctd class=\"tg-yw4l\">I have access to the best development tools\u003C/td>\n    \u003Ctd class=\"tg-yw4l\">74\u003C/td>\n  \u003C/tr>\n\u003C/table>\n\n\n### Tying individual attitudes to culture\n\nWhat are some other things that might contribute to a frustrating or dysfunctional culture? To try to hint at big, sometimes implicit things like psychological safety, bureaucracy, and whether their team is more democratic or autocratic, we had to come up with a list of concrete indicators, which you can see below:\n\n\u003Ccenter>\u003Cimg src=\"/images/blogimages/biggest-challenges-chart.png\" alt=\"biggest challenges to adopting new tools and practices\" class= \"shadow\" style=\"width: 700px;\"/>\u003C/center>\n\nWhen we asked about the biggest challenges teams face when adopting new processes or tools, the top three responses were replacing ingrained practices, resistance to change, and cross-team communication. Developers and managers are in agreement here almost exactly, although developers are slightly more likely to name resistance to chance (51 percent) than managers (46 percent).\n\nWe saw this echoed in other ways, with the greatest number of developers (42 percent) naming unclear direction as their top challenge to getting work done. Relatedly, just 57 percent of developers say they have visibility into what their team members in operations, security, and product are working on. Managers feel slightly better off in this regard, with 69 percent reporting that they have visibility (we also found some differences in how remote versus in-office teams view the issue, which you can read more about [here](/developer-survey/previous/2018/)).\n\n### What we want to learn next\n\nCommunication, and structures or habits that might enable or impede it, is a theme that we’re interested in learning more about. It’s a predictable problem with no easy fix, so we ran a Twitter poll to get some input on how teams have wrestled with communication issues in the past.\n\nOne suggestion for how to overcome the cultural barriers to adopting DevOps is to embed team members to improve cross-team collaboration, but that doesn’t always seem doable because it’s an organizational change, requiring buy-in from many more people than just the developers involved. It wasn’t surprising, then, that this option was chosen the least. Regular social activities and working sessions seem like much cheaper options, but were barely more popular. The greatest number of people simply chose our equivalent of ¯\\\\\\_(ツ)_/¯.\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">We heard from developers that miscommunication is a major challenge to getting work done \u003Ca href=\"https://t.co/Cvqwnf5tVH\">https://t.co/Cvqwnf5tVH\u003C/a>. \u003Cbr>\u003Cbr>What&#39;s the best way to improve communication issues between teams in your engineering organization?\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/973648916536205312?ref_src=twsrc%5Etfw\">March 13, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nWe heard from a few devs about solutions that didn’t make our short list, and they’re rarely about just talking to each other more. Tellingly, the responses we got were much more likely tying communication to big, pervasive cultural things, like compensation incentives and respect for others’ work.\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\" data-conversation=\"none\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Mutual respect and interest in the work of others. Especially between different but collaborating professions like design and development but also within a group of the same type.\u003C/p>&mdash; ᴄɪᴛɪᴢᴇɴ ᴅʀᴀɪɴ (@Citizen_Drain) \u003Ca href=\"https://twitter.com/Citizen_Drain/status/973671170808696832?ref_src=twsrc%5Etfw\">March 13, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\" data-conversation=\"none\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Writing documentation, and planning. Old skool and works.\u003C/p>&mdash; Peter Bowyer (@peterbowyer) \u003Ca href=\"https://twitter.com/peterbowyer/status/973650507930664966?ref_src=twsrc%5Etfw\">March 13, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nWhen we [asked](https://twitter.com/gitlab/status/974023284953006080) Netflix engineer Randall Koutnik for more details on his tweet (below) he [wrote a post](https://rkoutnik.com/2018/03/17/incentivize-teams-not-people.html) with examples of how dev teams can be undermined by policies tying financial incentives and promotion criteria to individual performance goals, rather than company performance.\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\" data-conversation=\"none\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Too many companies financially incentivize against teamwork. If my bonus is determined by me hitting my objectives, then it&#39;s counterproductive to help others instead of focusing in on my own work.\u003C/p>&mdash; Randall Koutnik (@rkoutnik) \u003Ca href=\"https://twitter.com/rkoutnik/status/973689841870229507?ref_src=twsrc%5Etfw\">March 13, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nWhy is this predictable problem so stubborn? What has your team tried? Tweet us [@gitlab](https://twitter.com/gitlab).\n\nPhoto by [Dylan Gillis](https://unsplash.com/photos/KdeqA3aTnBY) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[681,813,9],{"slug":6325,"featured":6,"template":686},"managers-more-optimistic-than-developers","content:en-us:blog:managers-more-optimistic-than-developers.yml","Managers More Optimistic Than Developers","en-us/blog/managers-more-optimistic-than-developers.yml","en-us/blog/managers-more-optimistic-than-developers",{"_path":6331,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6332,"content":6337,"config":6343,"_id":6345,"_type":14,"title":6346,"_source":16,"_file":6347,"_stem":6348,"_extension":19},"/en-us/blog/managing-multiple-environments-with-terraform-and-gitlab-ci",{"title":6333,"description":6334,"ogTitle":6333,"ogDescription":6334,"noIndex":6,"ogImage":990,"ogUrl":6335,"ogSiteName":670,"ogType":671,"canonicalUrls":6335,"schema":6336},"Managing multiple environments with Terraform and GitLab CI","This tutorial shows how to set up and manage three different environments in one project using GitLab CI and Terraform.","https://about.gitlab.com/blog/managing-multiple-environments-with-terraform-and-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Managing multiple environments with Terraform and GitLab CI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sophia Manicor\"},{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2023-06-14\",\n      }",{"title":6333,"description":6334,"authors":6338,"heroImage":990,"date":6340,"body":6341,"category":875,"tags":6342},[6339,5941],"Sophia Manicor","2023-06-14","\n\nUsing multiple environments ensures that your infrastructure as code (IaC) is rigorously tested before it is deployed. This tutorial will show a setup of how to manage **three different environments in one project** using GitLab CI and Terraform.\n\n## Prerequisites\n- Working knowledge of [GitLab CI/CD](https://docs.gitlab.com/ee/ci/introduction/index.html#continuous-integration)\n- An AWS / GCP account (where you will deploy to)\n- Working knowledge of Terraform\n- 5 minutes\n\n## Multiple environments\nIn this tutorial, we have three environments set up: dev, staging, and production.\n- Dev: This should be where all the experimental changes go. This environment is intended to develop new features and/or test out new changes.\n- Staging: After you have confirmed your changes in dev, this environment should have parity with the production environment.\n- Production: This environment has the latest versions of infrastructure and applications are live.\n\n## File structure\nFor each environment we set up a corresponding folder at the root level: folders are named dev, staging, and production respectively. Each folder stores all the Terraform infrastructure configuration for the corresponding environment. Within each of these folders, we created a CI file for that environment. \n\n## .gitlab-ci.yml\n\n### Environment-specific .gitlab-ci.yml\nThe file below is for the dev environment and is in the dev folder. Note that there is a rule with each job that only allows the jobs to run when a file in the dev folder is changed. There is a corresponding file in the staging and production folders that has the same rules to only allow jobs when those specific folders are changed. To keep each CI file running the same jobs we have made use of a helper file. \n\n[Environment-specific GitLab CI](https://gitlab.com/demos/infrastructure/terraform-multi-env/-/blob/main/dev/.gitlab-ci.yml)\n\n```yaml\ninclude:\n  - 'helper.yml'\n\nvariables:\n  TF_ROOT: ./dev  # The relative path to the root directory of the Terraform project\n  TF_STATE_NAME: default      # The name of the state file used by the GitLab-managed Terraform state backend\n  SECURE_ANALYZERS_PREFIX: \"$CI_TEMPLATE_REGISTRY_HOST/security-products\"\n  SAST_IMAGE_SUFFIX: \"\"\n  SAST_EXCLUDED_PATHS: \"spec, test, tests, tmp\"\n  PLAN: plan.cache\n  PLAN_JSON: plan.json\n\n\ncache:\n  key: \"${TF_ROOT}\"\n  paths:\n    - ${TF_ROOT}/.terraform/\n\nfmt-dev:\n  extends: .fmt\n  rules:\n      - changes:\n          - dev/**/*\n\nvalidate-dev:\n  extends: .validate\n  rules:\n      - changes:\n          - dev/**/*\n\nbuild-dev:\n  extends: .build\n  rules:\n      - changes:\n          - dev/**/*\n\nkics-iac-sast-dev:\n  extends: .kics-iac-sast\n  rules:\n      - changes:\n          - dev/**/*\n\ndeploy-dev:\n  extends: .deploy\n  rules:\n      - changes:\n          - dev/**/*\n\ndestroy-dev:\n  extends: .destroy\n  rules:\n      - changes:\n          - dev/**/*\n\n```\n\n### helper.yml\nThis helper file was created at the root level so that it could be referenced by all of the environment-specific files. The [helper.yml](https://gitlab.com/demos/infrastructure/terraform-multi-env/-/blob/main/helper.yml) is where all the heavy lifting is happening. This will make sure that all the jobs throughout the environment-specific file's configuration stays up to date and consistent. In the environment-specific files we 'included' the helper file and extended the jobs outlined below.\n\n```yaml\n\n.fmt:\n  stage: validate\n  script:\n    - cd \"${TF_ROOT}\"\n    - gitlab-terraform fmt\n  allow_failure: true\n\n\n.validate:\n  stage: validate\n  script:\n    - cd \"${TF_ROOT}\"\n    - gitlab-terraform validate\n\n\n.build:\n  stage: build\n  before_script:\n    - apk --no-cache add jq\n    - alias convert_report=\"jq -r '([.resource_changes[]?.change.actions?]|flatten)|{\\\"create\\\":(map(select(.==\\\"create\\\"))|length),\\\"update\\\":(map(select(.==\\\"update\\\"))|length),\\\"delete\\\":(map(select(.==\\\"delete\\\"))|length)}'\"\n  script:\n    - cd \"${TF_ROOT}\"\n    - gitlab-terraform plan -out=$PLAN\n    - gitlab-terraform plan-json | convert_report > $PLAN_JSON\n  resource_group: ${TF_STATE_NAME}\n  artifacts:\n    paths:\n      - ${TF_ROOT}/plan.cache\n    reports:\n      terraform: ${TF_ROOT}/$PLAN_JSON\n\n.kics-iac-sast:\n  stage: test\n  artifacts:\n    reports:\n      sast: gl-sast-report.json\n  image:\n    name: \"$SAST_ANALYZER_IMAGE\"\n  variables:\n    SEARCH_MAX_DEPTH: 4\n    SAST_ANALYZER_IMAGE_TAG: 3\n    SAST_ANALYZER_IMAGE: \"$SECURE_ANALYZERS_PREFIX/kics:$SAST_ANALYZER_IMAGE_TAG$SAST_IMAGE_SUFFIX\"\n  allow_failure: true\n  script:\n    - /analyzer run\n\n\n.deploy:\n  stage: deploy\n  script:\n    - cd \"${TF_ROOT}\"\n    - gitlab-terraform apply\n  resource_group: ${TF_STATE_NAME}\n  when: manual\n  rules:\n      - changes:\n          - ${TF_ENVIRONMENT}/**/*\n\n.destroy:\n  stage: cleanup\n  script:\n    - cd \"${TF_ROOT}\"\n    - gitlab-terraform destroy\n  resource_group: ${TF_STATE_NAME}\n  when: manual\n\n```\n\n### Root-level .gitlab-ci.yml\n[Root-level GitLab CI](https://gitlab.com/demos/infrastructure/terraform-multi-env/-/blob/main/.gitlab-ci.yml)\n\nThe file that brings everything above together is the root-level CI file. This will be what the pipeline initially references when run. The [root-level GitLab CI](https://gitlab.com/demos/infrastructure/terraform-multi-env/-/blob/main/.gitlab-ci.yml) is where all of the stages and container images are defined. Note that they are inheriting `.gitlab-ci.yml` from each of the individual folders themselves.\n\n```yaml\ninclude:\n  - 'dev/.gitlab-ci.yml'\n  - 'staging/.gitlab-ci.yml'\n  - 'production/.gitlab-ci.yml'\n  \nimage:\n  name: \"$CI_TEMPLATE_REGISTRY_HOST/gitlab-org/terraform-images/releases/1.1:v0.43.0\"\n\nstages:          \n  - validate\n  - build\n  - test\n  - deploy\n  - cleanup\n\nvariables:\n  # If not using GitLab's HTTP backend, remove this line and specify TF_HTTP_* variables\n  TF_STATE_NAME: default\n  TF_CACHE_KEY: default\n\n```\n\n## Merge request + promotion through environments\nWith the project set up and GitLab CI’s triggering only based off changes to the individual environment folders, you can now safely promote changes using merge requests. When you want to make a change:\n1. First create a merge request in the dev environment with your *.tf files.\n2. Review the [Terraform integration in merge requests](https://docs.gitlab.com/ee/user/infrastructure/iac/mr_integration.html) to see X changes, X to Add, and X to Remove.\n3. If your changes are as expected, request your team members to review the changes and Terraform code.\n4. Apply the changes to your dev environment and merge in the merge request.\n5. If everything worked as intended, then make the same merge up into the staging environment.\n6. If the staging environment remains stable, make a merge request up into the production environment.\n\n\n## Results\nVoila, and there you have it! **A single project to manage three different infrastructure environments** in a safe way to ensure that your changes to production are tested, reviewed, and approved by the rest of your team members.\n\n",[9,976,2243,875],{"slug":6344,"featured":6,"template":686},"managing-multiple-environments-with-terraform-and-gitlab-ci","content:en-us:blog:managing-multiple-environments-with-terraform-and-gitlab-ci.yml","Managing Multiple Environments With Terraform And Gitlab Ci","en-us/blog/managing-multiple-environments-with-terraform-and-gitlab-ci.yml","en-us/blog/managing-multiple-environments-with-terraform-and-gitlab-ci",{"_path":6350,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6351,"content":6356,"config":6361,"_id":6363,"_type":14,"title":6364,"_source":16,"_file":6365,"_stem":6366,"_extension":19},"/en-us/blog/many-meanings-multicloud",{"title":6352,"description":6353,"ogTitle":6352,"ogDescription":6353,"noIndex":6,"ogImage":1801,"ogUrl":6354,"ogSiteName":670,"ogType":671,"canonicalUrls":6354,"schema":6355},"Understand the many meanings of multicloud","In our 2020 DevSecOps Survey we uncovered a number of different definitions of 'multicloud.' Here's how to make sense of it all.","https://about.gitlab.com/blog/many-meanings-multicloud","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Understand the many meanings of multicloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-06-30\",\n      }",{"title":6352,"description":6353,"authors":6357,"heroImage":1801,"date":6358,"body":6359,"category":679,"tags":6360},[851],"2020-06-30","\n\nWhat does multicloud mean? We've heard – and used – the term '[multicloud](/topics/multicloud/)' for a while now but, like most industry terms, it can be defined differently by different groups. So in our just released [2020 Global DevSecOps Survey](/developer-survey/) we asked 3652 people from 21 countries across 19 job categories what multicloud actually means to them.\n\nThe majority of respondents (36%) said multicloud means the ability to deploy some applications on Azure, some on AWS, and some on Google. Almost 35% said they thought it meant deploying applications across multiple cloud providers with different components on different clouds. And finally almost 29% said it meant being able to move an app from one cloud provider to another.\n\nIt got even more interesting when we asked them to describe how multicloud is used in their organizations. A clear majority aren't doing \"multicloud\" yet - their teams use one cloud provider only, or none at all. (For context, over 18% of survey respondents said their organizations are not currently using any cloud provider.)\n\n_We don't use multi cloud here. Not yet._\n\n_We will deploy to the cloud the customer requires. But the whole application sits on one cloud._\n\n_We don't, on purpose, because we do not subscribe to the vendor lock-in argument and therefore multi-cloud would require more resources than we feel it is worth._\n\nOthers took the term multicloud literally, saying their teams use several different platforms.\n\n_We have moved workloads from one provider to another to address performance issues._\n\n_Multiple clouds used for different purposes and some on prem hyperconverged thrown into the mix._\n\n_We use different cloud providers for different projects._\n\n_We use Digital Ocean + GCP + AWS_\n\nAnd some took multicloud further.\n\n_We're switching between providers while testing new functionalities for our innovative apps._\n\n_We built our own PaaS based on Kubernetes. This system could be deployed/provisioned on any K8S ready public cloud provider or to any other compatible hosting system._\n\n_For us \"multicloud\" means simultaneously using multiple cloud providers, not just ability to migrate apps between them._\n\n_We avoid vendor lock-in by being able to run on any cloud seamlessly_\n\n## Defining the stages of multicloud\n\nSo is there a single definition of multicloud? The answer is yes, but... Our CEO [Sid Sijbrandij](/company/team/#sytses) argues that multicloud isn't something static but rather a series of staged workflows that mean different things to organizations depending on where they are in their DevOps journey. His [maturity model](https://medium.com/gitlab-magazine/multi-cloud-maturity-model-2de185c01dd7) consists of seven stages, starting with everything on a single cloud and ending with true data portability in multiple clouds.\n\n[William Chia](/company/team/#williamchia), senior product marketing manager for cloud native and GitOps, suggests starting by asking the question \"Where are your workloads running?\" For many organizations the answer will be one workload is running on one cloud and another workload might be running in a different cloud – the team is using multiple clouds. This is an early stage of maturity on the journey to multicloud adoption. \n\nA team might want to be able to deploy the same workload to different clouds, a step that would help avoid vendor lock-in, provide backup coverage in case of failures, and perhaps offer some leverage that might help with costs, William explains. But there's a serious trade-off in that step because the engineering resources required to create a platform to deploy to multiple clouds are significant.\n\n\"The Utopian goal of getting to a place where you can have the same workloads on multiple clouds easily is not necessarily desirable for everyone or even the majority of people,\" William says. \"It costs a lot to do that and you need to have the engineering staff who understand both clouds. There's a high cost to multicloud.\"\n\nThe final stages of multicloud, which William says today represents just a small fraction of companies today, is where the same application is deployed to different clouds and workloads as well as data for can be dynamically shifted between multiple clouds. \n\nSo the often-used term \"multicloud\" does legitimately have evolving definitions and that will likely continue as DevOps matures. One step on the multicloud journey that unlocks powerful benefits with little overhead is the jump to [work***flow*** portability](/topics/multicloud/). While most companies aren't close to the highest reaches of multicloud maturity, almost any company can get started down the path. It's clear that the best implementations take into consideration the tradeoffs and choose right amount of multicloud for the task at hand.\n\n**Read more about multicloud:**\n\n[Leverage GitLab CI/CD to get the most out of multicloud](/blog/gitlab-ci-cd-is-for-multi-cloud/)\n\n[The role cloud-agnostic DevOps can play](/blog/ci-cd-the-ticket-to-multicloud/)\n\n[Seven best practices for multicloud security](/blog/multi-cloud-security/)\n",[771,681,9],{"slug":6362,"featured":6,"template":686},"many-meanings-multicloud","content:en-us:blog:many-meanings-multicloud.yml","Many Meanings Multicloud","en-us/blog/many-meanings-multicloud.yml","en-us/blog/many-meanings-multicloud",{"_path":6368,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6369,"content":6374,"config":6380,"_id":6382,"_type":14,"title":6383,"_source":16,"_file":6384,"_stem":6385,"_extension":19},"/en-us/blog/merge-request-changes-summary-ai",{"title":6370,"description":6371,"ogTitle":6370,"ogDescription":6371,"noIndex":6,"ogImage":2857,"ogUrl":6372,"ogSiteName":670,"ogType":671,"canonicalUrls":6372,"schema":6373},"ML experiment: Summarize merge request changes","Learn how GitLab is experimenting with ML-powered merge request changes summarization in this sixth installment of our ongoing AI/ML in DevSecOps series.","https://about.gitlab.com/blog/merge-request-changes-summary-ai","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"ML experiment: Summarize merge request changes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kai Armstrong\"}],\n        \"datePublished\": \"2023-04-20\",\n      }",{"title":6370,"description":6371,"authors":6375,"heroImage":2857,"date":6377,"body":6378,"category":1178,"tags":6379},[6376],"Kai Armstrong","2023-04-20","\n\n\u003Ci>This blog post is part of an ongoing series about GitLab's journey to [build and integrate AI/ML into our DevSecOps platform](/blog/ai-ml-in-devsecops-series/). The series starts here: [What the ML is up with DevSecOps and AI?](/blog/what-the-ml-ai/). Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab.\u003C/i> \n\nMerge requests are the central point of collaboration for code changes in GitLab. They often contain a variety of changes across many files and services within a project. Often, merge requests communicate the intent of the change as it relates to an issue being resolved, but they might not describe what was changed to achieve that. As review cycles progress, the current state of the merge request can become out of sync with the realities of the proposed changes and keeping people informed. We believe that we can leverage AI and large language models (LLMs) to help provide relevant summaries of a merge request and its proposed changes, so reviewers and authors can spend more time discussing changes and less time keeping descriptions updated.\n\nIn a rapid prototype, [Kerri Miller](https://gitlab.com/kerrizor), Staff Backend Engineer for our [Code Review Group](/handbook/product/categories/#code-review-group), used AI to summarize the merge request changes directly within the [merge request](https://docs.gitlab.com/ee/user/project/merge_requests/). She developed a `/summarize_diff` quick action to post a summary of changes into a comment:\n\n![Merge request summary via AI](https://about.gitlab.com/images/blogimages/merge-request-changes-summary-ai.gif){: .shadow}\n\n## Iterating on AI/ML features\nWhile just an experiment today, we are iterating on how to effectively bring features like this to our customers. We're starting with providing complete summaries of what changes a merge request makes, and are beginning to look at more targeted flows to enhance the review cycle experience. Current areas we're investigating include providing:\n\n- Summaries of what's changed between each review cycle in a merge request.\n- Summaries of review feedback to merge request authors.\n\nThis experiment is just the start of the ways we're looking to infuse GitLab with AI/ML capabilities to help GitLab users become more efficient and effective at their jobs. We are [looking across the software development lifecycle](/blog/what-the-ml-ai/) for painful and time-consuming tasks that are ideal for AI-assisted features. We'll continue to share these demos throughout this blog series.\n\nInterested in using these AI-generated features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our ongoing series, \"[AI/ML in DevSecOps](/blog/ai-ml-in-devsecops-series/)\".\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[9,1180,916,1181],{"slug":6381,"featured":6,"template":686},"merge-request-changes-summary-ai","content:en-us:blog:merge-request-changes-summary-ai.yml","Merge Request Changes Summary Ai","en-us/blog/merge-request-changes-summary-ai.yml","en-us/blog/merge-request-changes-summary-ai",{"_path":6387,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6388,"content":6394,"config":6400,"_id":6402,"_type":14,"title":6403,"_source":16,"_file":6404,"_stem":6405,"_extension":19},"/en-us/blog/merge-trains-explained",{"title":6389,"description":6390,"ogTitle":6389,"ogDescription":6390,"noIndex":6,"ogImage":6391,"ogUrl":6392,"ogSiteName":670,"ogType":671,"canonicalUrls":6392,"schema":6393},"How to use merge train pipelines with GitLab","Read here an introduction on what merge trains are, how to use them and how to incorporate them to your GitLab project.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667210/Blog/Hero%20Images/merge-train-explained-banner.jpg","https://about.gitlab.com/blog/merge-trains-explained","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use merge train pipelines with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Veethika Mishra\"}],\n        \"datePublished\": \"2020-12-14\",\n      }",{"title":6389,"description":6390,"authors":6395,"heroImage":6391,"date":6397,"body":6398,"category":791,"tags":6399},[6396],"Veethika Mishra","2020-12-14","This blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2021-01-20.\n{: .alert .alert-info .note}\n\n[Merge trains](https://docs.gitlab.com/ee/ci/pipelines/merge_trains.html) is a powerful GitLab feature that empowers users to harness the potential of [pipelines for merge results](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html) to the fullest and also automatically merge a series of (queued) merge requests (MRs) without breaking the target branch. However, due to the structural complexity of the concept, users are often unable to use it effectively for their projects and play it safe by restricting their usage to MRs that pose minimum or no conflict with the target branch.\n\nAs a [senior product designer for Continuous Integration (CI)](/company/team/#veethikaa), I often deconstruct certain concepts and logic for features related to CI so that I have a strong foundation of understanding when making design proposals. Recently, I had a chance to hold a discussion around a very interesting feature - merge trains — with the team. This post unpacks the concept of merge trains by explaining the difference between merge trains, pipelines for MRs, and pipelines for merge results.\n\n## Pipelines for merge requests\n\nGenerally, when a new merge request is created, a pipeline runs to check if the new changes are eligible to be merged to the target branch. This is called the pipeline for merge requests (MRs). A good practice is to only keep the necessary jobs for validating the changes at this step, so the pipeline doesn’t take a long time to complete and CI minutes are not overused. GitLab allows users to [configure the pipeline for MRs](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html) by adding `rules:if: $CI_MERGE_REQUEST_IID` to the jobs they wish to run for MRs.\n\n![Pipeline for merge request](https://about.gitlab.com/images/blogimages/merge-train-explained-pipeline-for-merge-requests.jpg)\n\n### Pipelines for merge results\n\nMerge request pipelines verify the branch in isolation. The target branch may change several times during the lifetime of the MR, and these changes are not taken into consideration. In the time during which the pipeline for the MR runs (and succeeds), if the target branch progresses in the background and a user merges the changes to the target branch, they might eventually end up with a broken target.\n\nWhen a [pipeline for merge results](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html) runs, GitLab CI performs a _pretend_ merge against the updated target branch by creating a commit on an internal ref from the source branch, and then runs a pipeline against it. This pipeline validates the result prior to merging, therefore increasing the chances of keeping the target branch green.\n\n![Pipeline for merge results](https://about.gitlab.com/images/blogimages/merge-train-explained-pipeline-for-merge-results.jpg)\n\nWe should keep in mind that this pipeline does not run automatically with every update to the target branch. To learn more about this feature in detail and understand the process of enabling it in your GitLab instance, you can refer to the [official documentation on merge results](https://docs.gitlab.com/ee/ci/pipelines/merged_results_pipelines.html).\n\nHowever, if a long time has passed since the last successful pipeline ran, by the time the MR is ready to be merged, the target branch may have already changed and advanced. If we go ahead and merge your MR without re-running the pipeline for MRs, we could end up with a broken target branch. Merge trains can prevent this from happening.\n\n### About merge trains\n\nPipeline for merge results is an extremely useful feature in itself, but tracking the right slot to merge the feature branch into the target and remembering to run the pipeline manually before doing so is a lot to expect from a developer buried in tasks that involve deep logical thinking.\n\nTo tackle this complexity in workflow, GitLab introduced [the merge trains feature](https://docs.gitlab.com/ee/ci/pipelines/merge_trains.html) in [GitLab Premium 12.0](/releases/2019/06/22/gitlab-12-0-released/#sequential-merge-trains). Merge trains allow users to capitalize on the capabilities of pipelines for merge results to automate the process of merging to the target branch with minimum chances of breaking it.\n\nWith merge trains enabled, a merge request can be added to the train, which takes care of it until merged.\nA merge train can be imagined as a queue of MRs that is automatically managed for you.\n\n#### How do merge trains work?\n\nWhen users queue up their MRs in a merge train, GitLab performs a pretend merge for each source branch on top of the previous branch in the queue, where the first branch on the train is merged against the target branch.\nBy creating a temporary commit for each of these merges, GitLab can run merged result pipelines.\nThe first MR in the queue, after having a successful pipeline run for MRs, gets merged to the target branch.\n\nEvery time a merge request is merged into the target branch, the pipelines for the newly added MRs in the train would run against the target branch and the newly added changes from the recently merged MR and changes that are from MRs already in the train.\n\n![Pipeline for merge results](https://about.gitlab.com/images/blogimages/merge-train-explained-working.gif)\n\nMerge trains carry an immense possibility for innovation with GitLab as a toolchain. But to be able to build upon the concept, it is imperative to have a holistic understanding of the same at the system level.\n\nHopefully, this post does the job of breaking down the concept into layman's terms, thereby opening doors for future collaboration within [stage groups](/handbook/product/categories/) at GitLab.\n\nHave suggestions around improving merge trains? please leave your thoughts on this [epic](https://gitlab.com/groups/gitlab-org/-/epics/5122).\n",[976,977,916,9,728],{"slug":6401,"featured":6,"template":686},"merge-trains-explained","content:en-us:blog:merge-trains-explained.yml","Merge Trains Explained","en-us/blog/merge-trains-explained.yml","en-us/blog/merge-trains-explained",{"_path":6407,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6408,"content":6413,"config":6418,"_id":6420,"_type":14,"title":6421,"_source":16,"_file":6422,"_stem":6423,"_extension":19},"/en-us/blog/migrating-from-jenkins",{"title":6409,"description":6410,"ogTitle":6409,"ogDescription":6410,"noIndex":6,"ogImage":5186,"ogUrl":6411,"ogSiteName":670,"ogType":671,"canonicalUrls":6411,"schema":6412},"Migrating from Jenkins","Best practices for making the switch to GitLab CI/CD.","https://about.gitlab.com/blog/migrating-from-jenkins","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Migrating from Jenkins\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-11-26\",\n      }",{"title":6409,"description":6410,"authors":6414,"heroImage":5186,"date":6415,"body":6416,"category":679,"tags":6417},[788],"2019-11-26","\nMigrations feel daunting, which is one of the reasons teams put them off as long as possible. Even when tools are brittle or not working as they should, it’s the fear of the unknown that keeps us from making the plunge. Teams might have found workarounds to solve common problems but those only work... until they don’t work. If you know that you need to make a tool change or migration, it’s much better to do it early rather than during a crisis.\n\nMigrations don’t have to be scary. If you’re tired of brittle builds and endless plugin maintenance, migrating your CI/CD doesn’t have to be a headache. Several teams have [made the switch from Jenkins CI to GitLab CI/CD](/blog/5-teams-that-made-the-switch-to-gitlab-ci-cd/), and there are resources available to ease the transition.\n\n## From Jenkins to GitLab using Docker\n\nThe team at [Linagora](/blog/docker-my-precious/) loved that GitLab includes Git repository management, issue tracking, [code review](/stages-devops-lifecycle/create/), an IDE, activity streams, wikis, and built-in CI/CD to test, build, and deploy code. In order to take advantage of these all-in-one features, they needed to find a way to switch over from Jenkins CI. Luckily, GitLab’s Docker support and [documentation](https://docs.gitlab.com/ee/ci/docker/using_docker_images.html) allowed them to utilize custom Docker images, spin up services as part of testing, build new Docker images, and run on Kubernetes.\n\n### Running Jenkinsfiles in GitLab CI/CD\n\nOne short-term solution teams can use when migrating from Jenkins to GitLab CI/CD is [using Docker to run a Jenkinsfile in GitLab CI/CD](https://lackastack.gitlab.io/website/posts/gitlabci-jenkinsfile/) while the syntax is being updated. While this doesn’t address the endless [plugin dependencies](/blog/plugin-instability/), it’s a stop-gap measure that can get your team working in GitLab until the migration is complete.\n\n## Using Auto DevOps\n\n[Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/index.html) can potentially be used to build, test, and deploy your applications with little to no configuration needed at all. One of the more time-consuming tasks during a Jenkins migration can be converting the pipelines from Groovy to YAML, but Auto DevOps provides predefined CI/CD configurations – just push your code and Auto DevOps can build a default pipeline. Auto DevOps offers more features including security testing, performance testing, and code quality testing. If you need [advanced customizations](https://docs.gitlab.com/ee/topics/autodevops/index.html#customizing), you can modify the templates without having to start over on a completely different platform.\n\nGitLab senior solutions manager [Brendan O’Leary](/company/team/#brendan) provided a brief overview of how to convert a Jenkins pipeline built with Maven into a GitLab CI/CD pipeline using Auto DevOps.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/RlEVGOpYF5Y\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Advice from teams that made the switch\n\nAt our [GitLab Commit](/events/commit/) event in London, the team at adSoul, a Germany-based marketing automation company, discussed [their own transition from Jenkins to GitLab](/blog/adsoul-devops-transition-to-gitlab-ci/). They offered insight into their migration process, but for others considering GitLab CI/CD, here are some best practices:\n\n### Start small\n\nIn the spirit of iteration, it’s better to make incremental changes than try to tackle everything all at once. Even if it’s just small projects, or just running a Jenkinsfile in the meantime, be patient and aim for steady progress\n\n### Utilize tools effectively\n\nWith Docker and Auto DevOps, you have the tools available to ease the transition so you’re not reinventing the wheel.\n\n### Communicate clearly\n\nKeep teams informed of the process and communicate any changes. This can also apply to the naming of your new pipelines. Aim for clear job names, style your config for a better overview, and write comments for variables and hard-to-understand code.\n\nFor more information, check out our [migrating from Jenkins documentation](https://docs.gitlab.com/ee/ci/migration/jenkins.html).\n\nCover image by [Aryan Singh](https://unsplash.com/@wuzclicks?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/@wuzclicks?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText).\n{: .note}\n",[109,9],{"slug":6419,"featured":6,"template":686},"migrating-from-jenkins","content:en-us:blog:migrating-from-jenkins.yml","Migrating From Jenkins","en-us/blog/migrating-from-jenkins.yml","en-us/blog/migrating-from-jenkins",{"_path":6425,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6426,"content":6431,"config":6436,"_id":6438,"_type":14,"title":6439,"_source":16,"_file":6440,"_stem":6441,"_extension":19},"/en-us/blog/migrating-repositories-to-gitlab-just-became-easier",{"title":6427,"description":6428,"ogTitle":6427,"ogDescription":6428,"noIndex":6,"ogImage":2055,"ogUrl":6429,"ogSiteName":670,"ogType":671,"canonicalUrls":6429,"schema":6430},"Migrating repositories to GitLab just became easier","Automate data and user migration into GitLab using open core software Congregate.","https://about.gitlab.com/blog/migrating-repositories-to-gitlab-just-became-easier","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Migrating repositories to GitLab just became easier\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Bryan May\"}],\n        \"datePublished\": \"2021-10-26\",\n      }",{"title":6427,"description":6428,"authors":6432,"heroImage":2055,"date":5172,"body":6434,"category":769,"tags":6435},[6433],"Bryan May","\n\nAs customers begin their journey with GitLab, they often start by moving their source code repositories to GitLab. The GitLab Professional Services team has been helping customers with large scale [migrations](/services/migration/) for years and during this time have built a utility to automate the migration process - [Congregate](https://gitlab.com/gitlab-org/professional-services-automation/tools/migration/congregate#congregate). To ensure we’re aligned with the [GitLab values](https://handbook.gitlab.com/handbook/values/) of transparency and collaboration, we’re making it available to customers and partners. As of today, Congregate has been moved to a [source available](https://en.wikipedia.org/wiki/Source-available_software) disposition. \n\nFor smaller customers this might not be too important because they can use [GitLab import functionality](https://docs.gitlab.com/ee/user/project/settings/import_export.html) to migrate themselves. But for customers moving hundreds or thousands of source code repositories and associated users to GitLab, this is a game changer. And perhaps most importantly, our growing team of [channel services partners](https://partners.gitlab.com/) can now leverage Congregate functionality as they help customers move data. And for each contribution that partners or customers make back to Congregate, the larger [community](/community/) benefits. \n\n## Why are you doing this? Customers pay you for these services!\n\nGitLab Professional Services falls under the umbrella of Customer Success and its [mission](https://about.gitlab.com/handbook/customer-success/#mission-statement) is to _deliver value to all customers by engaging in a consistent, repeatable, scalable way across defined segments so that customers see the value in their investment with GitLab_. While Professional Services needs to maintain a balanced business (we are not a cost center), we believe that our paramount goal is to help our customers. As GitLab grows and the number of customers also increases, we will rely more heavily on our channel partners. We see making Congregate source available as a means to reach the largest quantity of customers with the highest quality migration service offering. Similar to how GitLab has [over 3,000 contributions from the wider community](/blog/3000-contributors-post/), we think welcoming contributions for this migration tool will help ensure GitLab and its partners converge on a single solution rather than diverging to many. \n\n## What do you mean by _Source Available_?\n\nCongregate will be licensed under the GitLab EE license. For services partners and customers, this means:\n\n![Legal Guidelines](https://about.gitlab.com/images/blogimages/2021-10-20-migration-automation/legal-guidelines2.png)\n\n## I’m a partner, will GitLab PS support my migration?\n\n- No, but support is available on a fee-based engagement. As an example if you have a customer migration that you need support on, you can engage GitLab PS as the Prime and GitLab will work with you to subcontract the engagement to you and provide the  necessary support.  \n- If a Partner is using Congregate on its own contract directly with the customer, GitLab PS will not provide support for Congregate. As such, Congregate is a USE AT YOUR OWN RISK tool. \n- Customer engagements on a partner contract intending to migrate to gitlab.com can be subcontracted to GitLab PS to help with these migration activities. \n\n_Note: GitLab PS will always need to be involved for migrations to gitlab.com as certain elevated privileges are required to maintain data integrity._\n\n## It's just a bunch of scripted API calls, what's so special?\n\nCongregate is using all of the published APIs so there isn’t a ton of “secret sauce” in the project. However, we have spent time optimizing for performance using multiprocessing techniques to reduce the time it takes to gather and push data. We’ve also created a standard logging format to provide auditability of what happened during a migration. Congregate can migrate data from many popular source systems to help the majority of our prospects and customers move to GitLab.  \n\n## How can I use it?\n\nWe are releasing a learning path for partners (or customers) to earn a [certified GitLab migration engineer badge](https://gitlab.badgr.com/public/badges/zzzdONLxRaCW5cDQSlHsgw). This learning journey will initially be released to GitLab team members and partners. It will include general information about importing data into GitLab, quizzes and exams to validate your knowledge, and a hands-on workshop where you will use Congregate to move data to a test GitLab instance. Once you pass, you will receive a badge that you can post in a **#humblebrag** to your social media network - that's what social media is for, right? We recommend going through this training to understand how to use Congregate. As a partner, you can access this certification learning journey [here](https://partners.gitlab.com/prm/English/c/Training). \n\n![Certified Migration Services Engineer](https://about.gitlab.com/images/blogimages/2021-10-20-migration-automation/migration-badge.png){: .shadow.center}\n\n\n\n\n",[9,231,682],{"slug":6437,"featured":6,"template":686},"migrating-repositories-to-gitlab-just-became-easier","content:en-us:blog:migrating-repositories-to-gitlab-just-became-easier.yml","Migrating Repositories To Gitlab Just Became Easier","en-us/blog/migrating-repositories-to-gitlab-just-became-easier.yml","en-us/blog/migrating-repositories-to-gitlab-just-became-easier",{"_path":6443,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6444,"content":6449,"config":6455,"_id":6457,"_type":14,"title":6458,"_source":16,"_file":6459,"_stem":6460,"_extension":19},"/en-us/blog/migration-from-atlassian-bamboo-server-to-gitlab-ci",{"title":6445,"description":6446,"ogTitle":6445,"ogDescription":6446,"noIndex":6,"ogImage":928,"ogUrl":6447,"ogSiteName":670,"ogType":671,"canonicalUrls":6447,"schema":6448},"Migrating from Bamboo Server to GitLab CI: Getting started","Theoretical reasoning and practical proposal on migrating an existing CI/CD infrastructure of some multi-component application from Bamboo Server to GitLab CI","https://about.gitlab.com/blog/migration-from-atlassian-bamboo-server-to-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to migrate Atlassian Bamboo Server's CI/CD infrastructure to GitLab CI, part one\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ivan Lychev\"}],\n        \"datePublished\": \"2022-07-06\",\n      }",{"title":6450,"description":6446,"authors":6451,"heroImage":928,"date":6452,"body":6453,"category":791,"tags":6454},"How to migrate Atlassian Bamboo Server's CI/CD infrastructure to GitLab CI, part one",[5192],"2022-07-06","\n\nWhen I faced a task of migrating from `Atlassian Bamboo Server` to `GitLab CI/CD`, I was not able to find any comprehensive information regarding something similar. So I designed a process on my own. This demo shows how to migrate a CI/CD structure for an existing multi-component application from a discontinued [Atlassian Bamboo Server](https://www.atlassian.com/migration/assess/journey-to-cloud) to [GitLab CI/CD](https://docs.gitlab.com/ee/index.html) (Community Edition).\n\nThe accompanying repository is https://gitlab.com/iLychevAD/ci-cd-for-a-multi-component-app.\n\nIn this first part of a two-part series, you will find a description of the current state of affairs - i.e., how the CI/CD has been organized within Bamboo Server, how the Bamboo Build and Deploy plans are designed for bootstrapping infrastructure and deploying the components of the application, and the architecture of the application itself.\n\nAnd in part two, we'll take a deeper look at the virtues of `GitLab CI/CD`.\n\n## Initial state\n\n(Note: This is not a description of some particular project but more a kind of compilation of several projects I worked on.)\n\nThe application solution allows the client to fulfill a particular business purpose (the nature of which is not relevant here and thus not specified) and consists of more than 50 discrete components (further referred to as `applications` or just `apps` or `components`). I refrain from calling them microservices as each of them looks more like a full-fledged application communicating with other siblings using REST API and messages in Kafka topics. Some of them expose a web UI to external or internal users and some are just utility parts serving the needs of other components or performing internal operations, etc.\n\nCode for each app is stored in its own Git repository (further just `repo`). So, a `multi-repo` approach is used for them. Each app may be written in different languages and packaged as one or several OCI-images for deployment.\n\nEach app repo looks like:\n```\n📦 \u003Csome-app-git-repo>\n ┣ 📂src \u003C-- application source code\n ┣ 📂docker-compose\n ┃ ┗ 📜docker-compose.yml \u003C-- analogue of K8s manifests\n ┗ 📜Dockerfile \u003C-- conventionally, \"Dockerfile\" name is used for OCI image specification file\n```\n\nFor running the applications, the client uses an outdated orchestration system (one from pre-Kubernetes epoch). So each app repo contains a Docker-compose compatible file describing deployment directives for that outdated orchestration system (in essence, similar to Kubernetes Deployment manifests). \n\nFor all of the build and deploy activities Atlassian Bamboo Server is used. \n\nSome details for those not familiar with the Bamboo Server - in an opinionated manner it explicitly separates so-called `build` pipelines and `deployment` pipelines. The former are supposed to build application code and produce some artifacts for further deployment (in our case those artifacts are OCI images uploaded to OCI registry and docker-compose.yml files referring to those images). The latter ones are supposed to take some particular set of artifacts and apply them to some particular `environment`. An `environment` (referred to `env` in the future for brevity) here is just an abstract deployment target characterized by a set of environment variables attached to it and exposed to the apps deployed into it. In reality, an `env` is implemented as a set of resources (virtual machines, databases, object storage locations, etc.) required by the applications.\n\nIn Bamboo, one `build` pipeline usually corresponds to one `deployment` pipeline so when the latter is started it just takes the artifacts from the attached `build` pipeline as input. \n\nThe client uses a `production` env, `preproduction` env, and numerous (up to several hundreds) so-called `staging` (short-lived) envs where different development teams and software engineers can test various combinations of the apps (here we assume that they have ~80-100 distinguish components of the application solution and several hundreds of software developers which gives a lot of possible combinations and requires so many `staging` envs).\n\nRoughly, a configuration of a `deploy` pipeline consists of a specification of the source artifacts (which are provided by the attached `build` pipeline as described earlier) and a specification of the set of envs where those artifacts (effectively, an application) can be deployed to.\n\nCurrent installation uses sophisticated dynamic generation of envs set for each app deployment pipeline. Roughly speaking, they have a central configuration file with the list of all existing envs where for each env a list of apps allowed to be deployed to it is denoted. Each time the file is modified (i.e., an env is created or deleted), the deployment pipelines are automatically being updated so as in the result each of them contains a list of envs corresponding for each app. You will have more idea about this aspect when you have looked at the implementation section later.\n\nIn the Bamboo UI this looks like:\n\n![envs_list_on_build_result_page](https://about.gitlab.com/images/blogimages/migration-from-atlassian-bamboo-server-to-gitlab-ci/envs_list_on_build_result_page.png)\n\nHere you can see an application build result page where on the right-hand side under the `Included in deployment project` title you can see a list of envs into which you can deploy the application. (Keep in mind that besides `build` and `deployment` pipelines, the Bamboo also uses a notion of `releases` - this is just some kind of an intermediate entity that should be created out of a build result to make it possible to deploy that build into some env). The `cloud-with-upwards-arrow` button in the `Actions` column starts a corresponding `deploy` pipeline with automatically passing the link to a build result (in a form of a `release` entity in Bamboo terminology) and the name of the env next to which the button has been clicked (the procedure of how a list of envs is created for a `deploy` pipe is described above).\n\nA concept of a `release` is specific to Bamboo Server, though it provides some amenities. For example, on the Release details page you can see a list of envs where a release has been deployed to. On the `Commits` tab you can backtrack a release to the application code in a SVC. And the `Issues` tab shows attached Jira tickets.\n\n![bamboo_release_details](https://about.gitlab.com/images/blogimages/migration-from-atlassian-bamboo-server-to-gitlab-ci/bamboo_release_details.png)\nRelease details page\n{: .note.text-center}\n\nAn env details page also enumerates releases history for this env (in scope of one particular application though as an env is specified for each deployment pipeline individually):\n\n![bamboo_env_details](https://about.gitlab.com/images/blogimages/migration-from-atlassian-bamboo-server-to-gitlab-ci/bamboo_env_details.png)\nEnv details page \n{: .note.text-center}\n\nAnd upon clicking the `cloud-with-upwards-arrow` button the Bamboo shows diff of Jira tickets and commits in respect to the previous `release` (only if both releases are made from artifacts from the same Git branch):\n\n![deploy_launch_page](https://about.gitlab.com/images/blogimages/migration-from-atlassian-bamboo-server-to-gitlab-ci/deploy_launch_page.png)\nDeploy launch page\n{: .note.text-center}\n\nSo, in general, the current path from source control to an env for each app looks like:\n\n![svc_to_env_path](https://about.gitlab.com/images/blogimages/migration-from-atlassian-bamboo-server-to-gitlab-ci/svc_to_env_path.png)\n\nThe Build plans are triggered automatically upon Git commits or Git tags. Most of the Deployment plans are started by the project members manually when needed. Each Deploy plan contains a step that checks if a user who started the plan has permissions to deploy into an env (for example, only members of the team which owns an env are allowed to deploy to that env and the deployment to the production env is allowed only for a set of eligible project members).\n\n## The task\n\nThe task is to migrate the aforementioned design from Bamboo Server to `GitLab` while keeping a similar deployment scheme (leveraging GitLab's `Environments` feature).\n\nAlso the following should be considered:\n\n - team members (software engineers, quality assurance specialists) are supposed to be able to manage environments on their own in a user-friendly self-service manner.\n - there should not be any discrepancy in IaC for different environments (per `12-factor apps` best practices), i.e. for any kind of an environment, be it a development or production one, the same set of IaC (here - Terraform files) should be used.\n  - the core ideas and workflows established in the previous situation (implemented with Atlassian Bamboo) should be kept to make the migration smoother for the members of the projects (also sometimes referred to as just users). \n\n## Implementation\n\n### Implementation's GitLab groups\\projects structure\n\n```\n📦 \u003CGitLab root group>\n ┣ 📂 apps GitLab group\n ┃ ┣ 📃 app1 GitLab project\n ┃ ┣  ...\n ┃ ┗ 📃 appN GitLab project\n ┣ 📂 ci GitLab group\n ┃ ┣ 📃 library GitLab project\n ┃ ┗ 📃 oci-registry GitLab project\n ┗ 📂 infra GitLab group\n  ┣ 📃 environment-blueprints GitLab project\n  ┣ 📃 environment-set GitLab project\n  ┗ 📃 k8s-gitops GitLab project\n```\n\n*Description*:\n\nThe most important content is in the `ci/library` repo (the shared ci configs) and `environment-set` repo. The other repos don't require much attention: The `k8s-gitops` purpose is not implemented and the repo is empty, the `apps` group just imitates source code for some apps, and the `ci/oci-registry` serves a role of an OCI registry for the solution.\n\nThe `apps` GitLab group merely contains the apps source code per se. Each GitLab project in this group corresponds to one app. Each app repo is expected to contain the source code itself (in the `src` directory for example), a `k8s` directory with k8s manifests, and an OCI image specification file (traditionally often called `Dockerfile`). \n\nThe `ci` GitLab group contains the `ci/library` project that holds shared `.gitlab-ci.yaml` files used by other projects (in a manner similar to Jenkins' shared libraries) and the `ci/oci-registry` serves as an OCI-image registry for various images used by the demo project (it also contains a Git repository with gitlab-ci files to build some utility images with tools used in various pipelines). For simplicity, the latter stores all the images throughout all the projects of the demo, though it's clearly not the best choice for a real-life situation when different sets of images of a set of separate projects/registries should be created.\n\nThe `infra` group holds applications infrastructure creation related Git repositories:\n\nThe `infra/k8s-gitops` is mostly irrelevant to the topic of this demo. In this demo it's presumed that Kubernetes is used as a computation workload platform and when a k8s cluster is created for an environment all the k8s manifests are supposed to be put into this repo (where each branch corresponds to a single environment) to be consumed by a GitOps tool installed into the cluster.\n\nThe `infra/environment-blueprints` holds parametrized IaC templates describing all the resources required for a full-fleged environment. In this example, the Terraform is used as an IaC tool though the principles are similar for its analogs (CloudFormation, for instance). The blueprints are parametrized in such manner that in the defaults values they hold some sensible values (most likely set to different values depending on the kind of a environment they were used to bootstrap - for example, a production env and everything else). It's implied that there might coexist several versions of the blueprints (implemented by using Git branches or Git tags) so each environment (see the next paragraph about `infra/environment-set`) can explicitly specify which version it wants to use (in case of using Terraform by specifying Git reference in the module's `source` field).\n\nHere I would like once again to highlight a digression from the best practices. For simplicity in the `infra/environment-blueprints` repo all the parts of an environment are combined into one single Terraform module (or a workspace, or a Stack in CloudFormation's terminology). In that way all the resources are always updated or changed within a single `terraform apply` command, which is cumbersome for large infrastructures containing a lot of resources. For larger infrastructures it would be more manageable to split into disparate Terraform modules (or CloudFormation Stacks, or Azure ARM Resource Groups) and thus make it possible for the infrastructure to be changed/updated in parts according to which exact components of it have changed. This might raise another question - how to manage dependencies in between such parts if they are present? For that, we would use some kind of an external (in respect to the IaC tool itself) orchestration tool like AWS Step Functions... or even GitLab's DAG feature!\n\nFinally, the `infra/environment-set` project represents an actual expected state of resources for each environment (a branch corresponds to an environment). See the README.md file in the Git repo for details. In short, each branch here is meant to contain a `main.tf` file referring to some version of the blueprints in the `infra/environment-blueprints` project, a set of Terraform files with overrides for any default variables set in the blueprints modules and other utility files like with a list of users allowed to deploy to the environment (such a list is to be checked by the deployments job in the apps projects).\n\n### **Important!**\n\nWhile looking at the implementation keep im mind that this solution deliberately omits some crucial aspects of any project infrastructure like security or monitoring, just for the sake of keeping this solution manageable and comprehensible. Implementing security and monitoring aspects would make the solution cumbersome and much longer to prepare. That is also true for the `k8s-gitops` repository - it's implied that in a real-life solution this would actively participate in the deployment process and hold Kubernetes clusters state in a GitOps approach but currently, this repo is just a placeholder. In the practical guide later you will see a description of the process of controlling environments using different branches in the `infra/environment-set` project. Ideally, such a workflow should use Merge Requests though for simplicity this implementation skips using MRs.\n\nAnother important thing that's possible not clear in this solution is configuration management, i.e. how configuration settings unique to each environment are provided to the applications inside an environment. Well, given that our applications run within Kubernetes cluster and that the cluster state is placed into a dedicated repo (`k8s-gitops` in our case), the configuration settings situation is simple - for each app the Terraform files in the `infra/environment-blueprints` should output all the sensible configuration values for the resources (like S3 bucket names, RDS endpoint URLs, etc.). Then, using Terraform itself or some other tool to create/update an environment, an additional step would collect all those outputs, transform them into k8s ConfigMap manifests, and put them into the GitOps repo. \n\nFor the secrets, we can go several ways. The most simplistic (though not flexible and not easy for secret rotation) way is to use some kind of encryption at rest like Mozilla's SOPS so that the secrets are being encrypted when they are put into the GitOps repo and decrypted when deployed into K8s. Another (and better ?) way - do not store secrets at rest at all but use either a third-party tool like Hashicorp Vault (with dynamic secrets generation) or cloud native features like [AWS IAM Roles for Service Accounts](https://aws.amazon.com/blogs/containers/diving-into-iam-roles-for-service-accounts/).\n\n## Bootstrap the demo\n\nThe accompanying repository, https://gitlab.com/iLychevAD/ci-cd-for-a-multi-component-app, contains Terraform files that enable you to install a copy of the demo structure into your own GitLab account to see it in action:\n\n`*.tf` files in the root directory and in the `tf_modules` directory describe the structure and configuration of the GitLab projects and groups. In the `repo_content` directory there is a content for the GitLab repositories in the projects. The repositories are filled with those files by the Terraform scripts.\n\nThe demo was tested with GitLab Community Edition `15.0.0-pre revision 4bda1cc84df`. The Terraform scripts do not create any real resources but just imitate them using `null_resource` and `local-exec`.\n\nThe bootstrapping process is conducted inside a container image (see the steps below) so it's platform-agnostic and in terms of tools all you need to spin up the demo is some containerization engine installed on your PC (i.e., Docker, Podman, etc).\n\n**Steps**:\n\n1. In the GitLab web UI manually create a root group to bootstrap the demo into (see `root_gitlab_group.tf` for a web-link why it's not possible to automate). Notice its ID - you need to provide it at the next step.\n\n2. Clone this repository.\n    Download an official Hashicorp's Terraform image and enter its interactive shell. All the further commands are supposed to be performed inside that shell:\n    \n    ```\n    docker run --rm -it --name ci-cd-for-a-multi-component-app \\\n      -e TF_VAR_gitlab_token=\u003Cyour GitLab account access token> \\\n      -v \u003Cpath to a location where to store ssh key-pairs on your PC>:/deploy-keys \\\n      -e TF_VAR_deploy_key_readwrite=/deploy-keys/ci-cd-for-a-multi-component-app-deploy-key.pub \\\n      -e TF_VAR_deploy_key_readonly=/deploy-keys/ci-cd-for-a-multi-component-app-deploy-key.pub \\\n      -e TF_VAR_root_gitlab_group_id=\u003CGitLab group ID> \\\n      -v \u003Cpath to the directory where you cloned the project into>:/repo -w /repo \\\n      --entrypoint /bin/sh \\\n      public.ecr.aws/hashicorp/terraform:1.1.9\n    ```\n    \n    Explanation:\n    \n    `-e TF_VAR_gitlab_token=\u003Cyour GitLab account access token>` - Terraform's `gitlab` provider needs a GitLab access token with sufficient permissions to spin up the demo. Provide it as a Bash environment variable - `TF_VAR_gitlab_token` (see `provider.tf`). It is also used by the `upload_avatar` module.\n    \n    `-v \u003Cpath to a location where to store ssh key-pairs on your PC>:/deploy-keys` - on the left-hand side here specify some directory on your local PC where you would like to store SSH keys needed for deploying the demo. Thus they are persisted even if you exit the container. See bullet point `4` for more details.\n    \n    `-e TF_VAR_deploy_key_readwrite=/deploy-keys/ci-cd-for-a-multi-component-app-deploy-key` and\n    \n    `-e TF_VAR_deploy_key_readonly=/deploy-keys/ci-cd-for-a-multi-component-app-deploy-key` - set the names for the aforementioned keys\n    \n    `-v \u003Cpath to the directory where you cloned the project into>:/repo -w /repo` - we mount the project content from your local PC into the running container. Note that because of that the Terraform local state file will be stored inside that directory on your PC.\n\n3. Install tools - bash and curl:\n    \n    ```\n    apk add bash curl\n \n    /bin/bash\n    ```\n\n4. Upon bootstrapping the demo, the repositories' content is pushed into (i.e. is restored) from the `repo_content` directory. (When the demo is destroyed the content of the repositories is automatically pulled (i.e. is saved) into the same directory - probably you dont need this but I implemented that for my convinience during creating the demo.) We need to create an SSH key pair and need it be the same throughout both phases. In this step we generate it:\n    \n    ```ssh-keygen -t rsa -N '' -f /deploy-keys/ci-cd-for-a-multi-component-app-deploy-key \u003C\u003C\u003C y```\n    \n    ```chmod 0400 /deploy-keys/ci-cd-for-a-multi-component-app-deploy-key```\n    \n    A trick used in `tf_modules/gitlab_project_with_restore_backup/main.tf` requires that in the host section of the SSH public key the location of the private key is specified (in a form like `filename@~/.ssh/\u003Cfilename>`). Otherwise the `tf_modules/gitlab_project_with_restore_backup` won't work. Edit accordingly:\n    \n    ```sed -i -e 's|^\\(ssh-rsa .*\\) \\(.*\\)$|\\1 ci-cd-for-a-multi-component-app-deploy-key@/deploy-keys/ci-cd-for-a-multi-component-app-deploy-key|' /deploy-keys/ci-cd-for-a-multi-component-app-deploy-key.pub```\n    ```\n\nNow you can proceed with bootstrapping the demo using Terraform:\n\nInitialize Terraform by `terraform init` so it installs all the providers.\n\nDeploy the demo with Terraform by `terraform apply`.\n\n**Notice**: During Terraform execution you may see an error:\n```\nError: POST https://gitlab.com/api/v4/projects/multi-component-app-root-group/ci/library/deploy_keys: 400 {message: {deploy_key.fingerprint_sha256: [has already been taken]}}\n\n```\nI believe this is some glitch in the GitLab API. To fix just run `terraform apply` once again until it shows no errors.\n\nAfter that you should see the following structure in GitLab in the root group:\n\n![gitlab_projects_tree](https://about.gitlab.com/images/blogimages/migration-from-atlassian-bamboo-server-to-gitlab-ci/gitlab_projects_tree.png)\n\nAll the projects should be filled with files from the `repo_content` directory.\n\nDo not delete the directory with the cloned project and the files created inside it if later you would want to clean up the things. See the next section for instructions.\n\n## Cleaning up\n\nLaunch a container image the same way you did for bootstrapping the demo (see the previous section). It's supposed that you didnt delete any files in `\u003Cpath to a location where to store ssh key-pairs on your PC>` and `\u003Cpath to the direcory where you cloned the project into>`: \n\n```\ndocker run --rm -it --name ci-cd-for-a-multi-component-app \\\n  -e TF_VAR_gitlab_token=\u003Cyour GitLab account access token> \\\n  -v \u003Cpath to a location where to store ssh key-pairs on your PC>:/deploy-keys \\\n  -e TF_VAR_deploy_key_readwrite=/deploy-keys/ci-cd-for-a-multi-component-app-deploy-key.pub \\\n  -e TF_VAR_deploy_key_readonly=/deploy-keys/ci-cd-for-a-multi-component-app-deploy-key.pub \\\n  -e TF_VAR_root_gitlab_group_id=\u003CGitLab group ID> \\\n  -v \u003Cpath to the direcory where you cloned the project into>:/repo -w /repo \\\n  --entrypoint /bin/sh \\\n  public.ecr.aws/hashicorp/terraform:1.1.9\n```\n\nInstall curl:\n\n```apk add curl```\n\nDo `terraform destroy`.\n\n**Notice**: You may see some errors regarding deleting the `oci-registry` project with OCI images. In that case just delete the images and remove the project manually or wait while GitLab does that itself later.\n\nNow if you want you can remove the cloned project directory and the `\u003Cpath to a location where to store ssh key-pairs on your PC>` directory.\n\nIf you would like to deploy the demo once again without removing the directory with the cloned repo dont forget to remove files created during the previous demo deployment, namely `terraform.tfstate` files in the root directory and `.git` directories everywhere in the `repo_content` directory.\n\nIn the [second part](/blog/how-to-migrate-atlassians-bamboo-servers-ci-cd-infrastructure-to-gitlab-ci-part-two/) of this tutorial, we'll look at a real-world example of how this can work.\n\n\n\n\n\n",[109,9,978],{"slug":6456,"featured":6,"template":686},"migration-from-atlassian-bamboo-server-to-gitlab-ci","content:en-us:blog:migration-from-atlassian-bamboo-server-to-gitlab-ci.yml","Migration From Atlassian Bamboo Server To Gitlab Ci","en-us/blog/migration-from-atlassian-bamboo-server-to-gitlab-ci.yml","en-us/blog/migration-from-atlassian-bamboo-server-to-gitlab-ci",{"_path":6462,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6463,"content":6468,"config":6473,"_id":6475,"_type":14,"title":6476,"_source":16,"_file":6477,"_stem":6478,"_extension":19},"/en-us/blog/ml-experiment-sql",{"title":6464,"description":6465,"ogTitle":6464,"ogDescription":6465,"noIndex":6,"ogImage":2857,"ogUrl":6466,"ogSiteName":670,"ogType":671,"canonicalUrls":6466,"schema":6467},"ML experiment: Writing SQL is about to get a lot easier","Learn how GitLab is experimenting with ML-powered product features in this third installment of our ongoing AI/ML in DevSecOps series.","https://about.gitlab.com/blog/ml-experiment-sql","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"ML experiment: Writing SQL is about to get a lot easier\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2023-03-30\",\n      }",{"title":6464,"description":6465,"authors":6469,"heroImage":2857,"date":6470,"body":6471,"category":1178,"tags":6472},[2862],"2023-03-30","\n\n\u003Ci>This blog post is part of an ongoing series about GitLab's journey to [build and integrate AI/ML into our DevSecOps platform](/blog/ai-ml-in-devsecops-series/). The series starts here: [What the ML is up with DevSecOps and AI?](/blog/what-the-ml-ai/). Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab.\u003C/i>\n\nSQL, the structured query language, has long been the backbone of data analysis and manipulation. But let's face it, not everyone is an SQL wizard. For many, writing even a simple SQL query can be a daunting task, let alone tackling more advanced queries. Even experienced data analysts spend lots of time and effort writing and debugging complex queries just to answer simple business intelligence questions.\n\nWith the recent advancements in AI and natural language processing, it's now possible for AI models to generate SQL code from simple English language queries. This means that even people without a deep understanding of SQL can generate complex queries to analyze their data. This technology not only improves accessibility but can also save valuable time and effort for data analysts.\n\n## AI-assisted SQL generation\nAt GitLab, we’re experimenting with AI-assisted SQL generation in our [Product Analytics group](https://docs.gitlab.com/ee/user/product_analytics/). This area is focused on helping users understand and gain insights from usage patterns. GitLab Product Analytics can track events within your project applications, which enables you to explore your data and generate dashboards with interactive graphs and charts. You can use our visual designer or YAML to define them, and we envision it becoming even easier with AI assistance. You can learn more about our Product Analytics plans in our [sneak peek blog post](/blog/introducing-product-analytics-in-gitlab/).\n\nIn a simple experiment, our own [Tim Zallmann](https://gitlab.com/timzallmann), Senior Director of Engineering, prototyped leveraging AI-generated queries from simple natural language parsing. The results quickly showcase how powerful using natural language can be to help generate the SQL to populate the Product Analytics dashboards. \n\n![Animated gif image of SQL generation](https://about.gitlab.com/images/blogimages/sql-query-generation-lg.gif){: .shadow}\n\nAbove, you can see an example of how we're using natural language to generate SQL queries to power dashboard charts and graphs. You can watch the full demo in the video below. \n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/q3HZy0P0ugw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## Iterating on AI/ML features\nWhile just an experiment today, we are iterating on how to effectively bring features like this to our customers. This experiment is just the start of many ways we’re looking to infuse GitLab with AI/ML capabilities to help GitLab users become more efficient and effective at their jobs. We are [looking across the software development lifecycle](/blog/what-the-ml-ai/) for painful and time-consuming tasks that are ideal for AI Assisted features. We’ll be sharing more of these demos in this blog series.\n\nInterested in using these AI-generated features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our ongoing series, \"[AI/ML in DevSecOps](/blog/ai-ml-in-devsecops-series/)\".\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[9,1180,916,1181],{"slug":6474,"featured":6,"template":686},"ml-experiment-sql","content:en-us:blog:ml-experiment-sql.yml","Ml Experiment Sql","en-us/blog/ml-experiment-sql.yml","en-us/blog/ml-experiment-sql",{"_path":6480,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6481,"content":6487,"config":6492,"_id":6494,"_type":14,"title":6495,"_source":16,"_file":6496,"_stem":6497,"_extension":19},"/en-us/blog/mobile-devops-with-gitlab-part-1",{"title":6482,"description":6483,"ogTitle":6482,"ogDescription":6483,"noIndex":6,"ogImage":6484,"ogUrl":6485,"ogSiteName":670,"ogType":671,"canonicalUrls":6485,"schema":6486},"Mobile DevOps: Code signing with project-level secure files","An introduction to mobile code signing with the new Project-level Secure Files feature.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668629/Blog/Hero%20Images/refargotohp-mzZp_9QpYLc-unsplash.jpg","https://about.gitlab.com/blog/mobile-devops-with-gitlab-part-1","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Mobile DevOps with GitLab, Part 1 - Code signing with Project-level Secure Files\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darby Frey\"}],\n        \"datePublished\": \"2022-09-20\",\n      }",{"title":6488,"description":6483,"authors":6489,"heroImage":6484,"date":3229,"body":6490,"category":769,"tags":6491},"Mobile DevOps with GitLab, Part 1 - Code signing with Project-level Secure Files",[1260],"\n\nMobile teams face some unique challenges when it comes to establishing DevOps practices. Build tools are different, release and approval cycles with app stores can be slower and introduce more risk, and some applications require specialized runners. At GitLab, we are focused on finding solutions to these challenges to make it easier for [everyone to contribute](/company/mission/#everyone-can-contribute)! Starting with mobile code signing.\n\nThis post is the first in a series on mobile DevOps and it shows how GitLab makes code signing easier using a new feature called Project-level Secure Files.\n\n## A brief introduction to mobile code signing\n\nAndroid and iOS projects require special configuration files for secure application code signing to ensure an application on a user's device hasn't been tampered with. These configuration files can be challenging to manage in a [CI environment](/topics/ci-cd/benefits-continuous-integration/). Keystores, signing certificates, and provisioning profiles shouldn't be stored in version control because they contain sensitive information. These files are also binary (not text), so they can't easily be stored as CI variables.\n\nTo make this process easier, [we've introduced a feature in GitLab 15.0 called Project-level Secure Files](/releases/2022/05/22/gitlab-15-0-released/#project-level-secure-files-in-open-beta). This feature allows these files to be stored securely as part of a GitLab project but outside version control. Secure Files can then easily be loaded into a CI job when it's time to execute the code signing process.\n\nGet started by adding a secure file to a project:\n\n1. On the top bar, select **Menu > Projects** and find your project.\n2. On the left sidebar, select **Settings > CI/CD**.\n3. In the **Secure Files** section, select **Expand**.\n4. Select **Upload File**.\n5. Find the file to upload, select **Open**, and the file upload begins immediately. The file shows up in the list when the upload is complete.\n\n![Upload Secure File](https://about.gitlab.com/images/blogimages/2022-09-19-mobile-devops-with-gitlab-part-1-introducing-project-level-secure-files/upload-secure-file.png)\n\n![List Secure Files](https://about.gitlab.com/images/blogimages/2022-09-19-mobile-devops-with-gitlab-part-1-introducing-project-level-secure-files/list-secure-files.png)\n\nWith the files securely stored with the project, the next step is to load them into a [CI/CD](/topics/ci-cd/) job. To use your secure files in a CI/CD job, you must use the [download-secure-files](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/download-secure-files) tool to download the files in the job. After downloading them, these files can be used in any CI job.\n\nAdd a command in the script section of your job to download the download-secure-files tool and execute it. It's also important to specify the download location for the secure files by setting the desired path in the `SECURE_FILES_DOWNLOAD_PATH` [CI/CD variable](https://docs.gitlab.com/ee/ci/variables/index.html).\n\nFor example:\n\n```\ntest:\n  variables:\n    SECURE_FILES_DOWNLOAD_PATH: './where/files/should/go/'\n  script:\n    - curl --silent \"https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/download-secure-files/-/raw/main/installer\" | bash\n```\n\nNow, when the CI job runs, all of the secure files will be available in the location specified. They can then be passed into a build script or loaded into the Apple keychain. \n\nThat's it! Give it a try, and let us know what you think in the [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/362407).\n\nNext time we will walk through [how to set up code signing for an Android app](/blog/mobile-devops-with-gitlab-part-2/).\n\nCover image by \u003Ca href=\"https://unsplash.com/@refargotohp?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">refargotohp\u003C/a> on \u003Ca href=\"https://unsplash.com/s/photos/mobile-app-building?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Unsplash\u003C/a>.\n",[855,9,109],{"slug":6493,"featured":6,"template":686},"mobile-devops-with-gitlab-part-1","content:en-us:blog:mobile-devops-with-gitlab-part-1.yml","Mobile Devops With Gitlab Part 1","en-us/blog/mobile-devops-with-gitlab-part-1.yml","en-us/blog/mobile-devops-with-gitlab-part-1",{"_path":6499,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6500,"content":6506,"config":6511,"_id":6513,"_type":14,"title":6514,"_source":16,"_file":6515,"_stem":6516,"_extension":19},"/en-us/blog/mobile-devops-with-gitlab-part-2",{"title":6501,"description":6502,"ogTitle":6501,"ogDescription":6502,"noIndex":6,"ogImage":6503,"ogUrl":6504,"ogSiteName":670,"ogType":671,"canonicalUrls":6504,"schema":6505},"Mobile DevOps with GitLab, Part 2 - Code signing for Android with GitLab","This second part of our tutorial series shows how to use Project-level Secure Files to sign an Android application.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668592/Blog/Hero%20Images/teddy-gr--adWwTRAm1g-unsplash.jpg","https://about.gitlab.com/blog/mobile-devops-with-gitlab-part-2","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Mobile DevOps with GitLab, Part 2 - Code signing for Android with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darby Frey\"}],\n        \"datePublished\": \"2022-09-28\",\n      }",{"title":6501,"description":6502,"authors":6507,"heroImage":6503,"date":6508,"body":6509,"category":769,"tags":6510},[1260],"2022-09-28","\n\nIn Part 1 of this tutorial series, we talked about a new feature in GitLab called [Project-level Secure Files](/blog/mobile-devops-with-gitlab-part-1/). With Project-level Secure Files, you can securely store your build keys as part of your project in GitLab, and avoid [some](https://www.reddit.com/r/androiddev/comments/a4ydhj/how_to_update_app_when_lost_keystore_file/) [painful](https://www.reddit.com/r/gamemaker/comments/v98den/lost_keystore_for_publishing_to_google_play_store/) [problems](https://www.reddit.com/r/androiddev/comments/95oa55/is_there_anyway_to_update_my_app_after_having/) caused by lost keystore files.\n\nIn this blog post, I'll show you how to create a Keystore file and use it to sign an Android application. Then I'll show you how to quickly create a CI pipeline in GitLab using Project-level Secure Files.\n\n## Generate a private signing key\n\nThe first thing you'll need is a Keystore file. This file is used to securely sign the application. You can generate a Keystore file from your machine by running the following command:\n\n```\nkeytool -genkey -v -keystore release-keystore.jks -alias release -keyalg RSA -keysize 2048 -validity 10000\n```\n\nDuring this process, you'll be asked to create a new password for the Keystore file and provide some information about you and your organization. See the example below:\n\n![Generate Android Keystore](https://about.gitlab.com/images/blogimages/2022-09-19-mobile-devops-with-gitlab-part-2-code-signing-for-android-with-gitlab/generate-keystore.png)\n\n\n## Configure your application\n\nThe next step is to set some environment variables and update build.gradle to add the new signing configuration. First, set the following environment variables in either a .env file or in the shell via export.\n\n* `ANDROID_KEY_ALIAS` is the alias you gave for the key in the keytool command above. In this example the value is release.\n* `ANDROID_KEYSTORE_PASSWORD` is the new password you supplied to the keytool command above.\n* `ANDROID_KEY_STOREFILE` is the path to the new keystore file you just created. In this example we're using `../release-keystore.jks`.\n\nWith the environment variables set, the next step is to update the build configuration to use the new Keystore in the build process. In the `app/build.gradle` file add the following configuration inside the Android block for the release signing config.\n\n```\nandroid {\n    ...\n    defaultConfig { ... }\n    signingConfigs {\n        release {\n           storeFile file(System.getenv('ANDROID_KEY_STOREFILE'))\n           storePassword System.getenv('ANDROID_KEYSTORE_PASSWORD')\n           keyAlias System.getenv('ANDROID_KEY_ALIAS')\n           keyPassword System.getenv('ANDROID_KEYSTORE_PASSWORD')\n        }\n    }\n    buildTypes {\n        release {\n            ...\n            signingConfig signingConfigs.release\n        }\n    }\n}\n```\n\nSave these changes to the `app/build.gradle file`, and run the build locally to ensure everything works. Use the following command to run the build:\n\n```\n./gradlew assembleRelease\n```\n\nIf everything worked you'll see a message saying **BUILD SUCCESSFUL**.\n\n## Configure project\n\nWith the build running locally, it takes just a couple of steps to get it running in GitLab [CI](/topics/ci-cd/). The first step is to upload your Keystore file in GitLab. \n\n1. On the top bar, select **Menu > Projects** and find your project.\n2. On the left sidebar, select **Settings > CI/CD**.\n3. In the **Secure Files** section, select **Expand**.\n4. Select **Upload File**.\n5. Find the file to upload, select **Open**, and the file upload begins immediately. The file shows up in the list when the upload is complete.\n\n![Upload Secure File](https://about.gitlab.com/images/blogimages/2022-09-19-mobile-devops-with-gitlab-part-2-code-signing-for-android-with-gitlab/upload-secure-file.png)\n\n![List Secure Files](https://about.gitlab.com/images/blogimages/2022-09-19-mobile-devops-with-gitlab-part-2-code-signing-for-android-with-gitlab/list-secure-files.png)\n\nThe next step is to set the CI variables in your project. \n\n1. On the top bar, select **Menu > Projects** and find your project.\n2. On the left sidebar, select **Settings > CI/CD**.\n3. In the **Variables** section, select **Expand**.\n4. Create entries for the three environment variables set earlier: `ANDROID_KEY_ALIAS`, `ANDROID_KEY_STOREFILE`, `ANDROID_KEYSTORE_PASSWORD`.\n\n![List Secure Files](https://about.gitlab.com/images/blogimages/2022-09-19-mobile-devops-with-gitlab-part-2-code-signing-for-android-with-gitlab/list-ci-variables.png)\n\n## CI/CD pipelines\n\nOnce the project is configured, the final step is to create the build configuration in the `.gitlab-ci.yml` file. Below is a sample file.\n\n```\nstages:\n  - build\n\nbuild_android:\n  image: fabernovel/android:api-31-v1.6.1\n  stage: build\n  variables:\n    SECURE_FILES_DOWNLOAD_PATH: './'\n  script:\n    - apt update && apt install -y curl\n    - curl --silent \"https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/download-secure-files/-/raw/main/installer\" | bash\n    - ./gradlew assembleRelease\n  artifacts:\n    paths:\n      - app/build/outputs/apk/release\n```\n\nA few interesting bits from this configuration:\n\n1. Image: [https://github.com/faberNovel/docker-android](https://github.com/faberNovel/docker-android) provides a collection of prebuilt Docker images that work great for CI systems. Find the right version for your project in Docker Hub [https://hub.docker.com/r/fabernovel/android/tags](https://hub.docker.com/r/fabernovel/android/tags). \n2. Script: Depending on the image, you may need to install curl; the first line of the example script installs curl to be used in the second line to download and execute the [download-secure-files](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/download-secure-files) tool.\n3. Variables: `SECURE_FILES_DOWNLOAD_PATH` tells [download-secure-files](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/download-secure-files) where to download the Keystore file.\n4. Artifacts: Make the build output available to be downloaded from the CI job, or used in subsequent jobs in the pipeline.\n\nCommit the changes to your `.gitlab-ci.yml` file and after you push the changes to GitLab the build will start.\n\nTake a look at [this branch in the sample project](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/android_demo/-/tree/basic_build) for reference.\n\nGive it a try, and let us know what you think in the [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/362407). Then, check out Part 3, which deals with [code signing for iOS](/blog/mobile-devops-with-gitlab-part-3-code-signing-for-ios-with-gitlab-and-fastlane/). \n\n\n\n_Cover image by  \u003Ca href=\"https://unsplash.com/@teddygr?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Teddy GR\u003C/a> on \u003Ca href=\"https://unsplash.com/s/photos/google-phone?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Unsplash\u003C/a>_\n",[9,855,916,109],{"slug":6512,"featured":6,"template":686},"mobile-devops-with-gitlab-part-2","content:en-us:blog:mobile-devops-with-gitlab-part-2.yml","Mobile Devops With Gitlab Part 2","en-us/blog/mobile-devops-with-gitlab-part-2.yml","en-us/blog/mobile-devops-with-gitlab-part-2",{"_path":6518,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6519,"content":6525,"config":6530,"_id":6532,"_type":14,"title":6533,"_source":16,"_file":6534,"_stem":6535,"_extension":19},"/en-us/blog/mobile-devops-with-gitlab-part-3-code-signing-for-ios-with-gitlab-and-fastlane",{"title":6520,"description":6521,"ogTitle":6520,"ogDescription":6521,"noIndex":6,"ogImage":6522,"ogUrl":6523,"ogSiteName":670,"ogType":671,"canonicalUrls":6523,"schema":6524},"Mobile DevOps: iOS code signing with GitLab CI & Fastlane","Learn how to use Project-level Secure Files with Fastlane Match to sign an iOS app.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668568/Blog/Hero%20Images/vinicius-amnx-amano-IPemgbj9aDY-unsplash.jpg","https://about.gitlab.com/blog/mobile-devops-with-gitlab-part-3-code-signing-for-ios-with-gitlab-and-fastlane","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Mobile DevOps with GitLab, Part 3 - Code signing for iOS with GitLab CI and Fastlane\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darby Frey\"}],\n        \"datePublished\": \"2022-10-03\",\n      }",{"title":6526,"description":6521,"authors":6527,"heroImage":6522,"date":1316,"body":6528,"category":769,"tags":6529},"Mobile DevOps with GitLab, Part 3 - Code signing for iOS with GitLab CI and Fastlane",[1260],"\n\nThis post is the third in a series of three blog posts showing how GitLab makes code signing easier using a new feature called Project-level Secure Files.\n\n- [Part 1](/blog/mobile-devops-with-gitlab-part-1/) introduces the Project-level Secure Files feature and the basics of getting started.\n- [Part 2](/blog/mobile-devops-with-gitlab-part-2/) shows an example of how to use Project-level Secure Files to sign an Android app.\n- This post shows how to use the integration with Fastlane Match to sign an iOS app.\n\nCode signing for iOS projects is [notoriously](https://twitter.com/davidcrawshaw/status/1159083791232765953) [difficult](https://twitter.com/bc3tech/status/692778139517255680) and can lead to a lot of time spent debugging errors, but a tool called Fastlane makes it much easier. [Fastlane](https://fastlane.tools/) is an open source tool that greatly simplifies the complexity of the code signing process for iOS development.\n\nIn [Fastlane 2.207.2](https://github.com/fastlane/fastlane/pull/20386) we released support for Project-level Secure Files as a storage backend for Fastlane Match, making it even easier for mobile projects to manage their signing certificates and provisioning profiles within GitLab. Now, we will cover a couple of ways to get started using Project-level Secure Files in a Fastlane project.\n\n## Set up Fastlane Match\n\nIf your project doesn't have a Fastlane Matchfile yet, you can generate one by running the following:\n\n```\nbundle exec fastlane match init\n```\n\nThis command will prompt you to choose which storage backend you want to use (select `gitlab_secure_files`) and to input your project path (for example: `gitlab-org/gitlab`). It will then generate a Fastlane Matchfile configured to use your project's secure files for Fastlane Match.\n\n![Initialize Fastlane Match](https://about.gitlab.com/images/blogimages/2022-09-19-mobile-devops-with-gitlab-part-3-code-signing-for-ios-with-gitlab-and-fastlane/match-init.png)\n\n## Generate a Personal Access Token\n\nNext, you'll need a GitLab Personal Access Token to use Fastlane Match from your local machine. To create a Personal Access Token, visit the Access Tokens section in your GitLab profile (for example: [https://gitlab.com/-/profile/personal_access_tokens](https://gitlab.com/-/profile/personal_access_tokens)). Create a new token with the “api” scope. Take note of the token you just created, we'll be using it later.\n\n## Generate and upload \n\nIf you have not created signing certificates or provisioning profiles yet for your project, running Fastlane Match will do all of the work for you. Run the command below with your Personal Access Token:\n\n```\nPRIVATE_TOKEN=YOUR-TOKEN bundle exec fastlane match \n```\n\nYou may be prompted to log in with your Apple developer account. Once authenticated, this command will generate development certificates and profiles in the Apple Developer portal and upload those files to GitLab. You'll be able to view the files in your project's CI/CD settings as soon as the command completes.\n\nYou can also generate other certificate types by specifying the type in the command, for example:\n\n```\nPRIVATE_TOKEN=YOUR-TOKEN bundle exec fastlane match appstore\n```\n\n## Upload-only\n\nIf you have already created signing certificates and provisioning profiles for your project, you can use Fastlane Match Import to load your existing files into Project-level Secure Files. Simply run:\n\n```\nPRIVATE_TOKEN=YOUR-TOKEN bundle exec fastlane match import\n```\n\nYou'll be prompted to input the path to your files. Once those options are provided, your files will be uploaded and visible in your project's CI/CD settings. (Note: If you are prompted for the git_url during the import, it is safe to leave it blank and hit enter.)\n\n![Fastlane Match Import](https://about.gitlab.com/images/blogimages/2022-09-19-mobile-devops-with-gitlab-part-3-code-signing-for-ios-with-gitlab-and-fastlane/match-import.png)\n\n## CI/CD pipelines\n\nWith your signing certificates and provisioning profiles loaded in Project-level Secure Files, it's now easy to use those files in your [CI/CD pipelines](/topics/ci-cd/). No access tokens are needed when running jobs in GitLab, so you can load your files into a CI/CD job by adding the fastlane command to a CI job script. For example:\n\n```\ntest:\n  stage: test\n  script:\n    bundle exec fastlane match –readonly\n```\n\nUsing the –readonly flag on CI is suggested to prevent any unintended changes to signing certificates by Fastlane. The Fastlane Match command will sync the certificates to the machine, but does not build the application. To run match and build, configure a lane in your project's Fastfile to do both steps. For example:\n\n**Fastfile**\n\n```\ndefault_platform(:ios)\n\nplatform :ios do\n  desc \"Build the App\"\n  lane :build do\n    setup_ci\n    match(type: 'appstore', readonly: is_ci)\n    build_app(\n      clean: true,\n      project: \"ios_demo.xcodeproj\", \n      scheme: \"ios_demo\"\n    )\n  end\nend\n```\n\n**Matchfile**\n\n```\ngitlab_project(\"gitlab-org/incubation-engineering/mobile-devops/ios_demo\")\nstorage_mode(\"gitlab_secure_files\")\ntype(\"appstore\")\n```\n\n**.gitlab-ci.yml File**\n\n```\nbuild:\n  stage: build\n  script:\n    - bundle exec fastlane build\n```\n\nWith all of that in place, you'll have a CI pipeline that runs a single build job. That job will use the `:build` lane from fastlane to run `setup_ci`, `match`, and `build_app`. The result from that job will be a build of your app, signed with the certificates stored in your project with Project-level Secure Files. You could then extend fastlane to push that build to Test Flight or the App Store.\n\nFastlane does a good job of handling the complexity associated with certificate management, so you don't have to worry about it, but there is a bit of a learning curve to getting used to Fastlane. Take a look at [this branch](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/ios_demo/-/tree/fastlane_build) in the ios_demo project to for a full working example. Please add any feedback you have in the [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/362407).\n\n## Better Mobile DevOps\n\nWith Project-level Secure Files, you no longer need to rely on hacks or workarounds to automate code signing, and it can be easily added to new or existing [CI/CD pipelines](/topics/ci-cd/).\n\nFor more about how we are working to make better Mobile DevOps at GitLab, check out the [Mobile DevOps Docs](https://docs.gitlab.com/ee/ci/mobile_devops.html), [SaaS runners on macOS](https://docs.gitlab.com/ee/ci/runners/saas/macos_saas_runner.html), and the [Mobile DevOps Playlist](https://www.youtube.com/playlist?list=PL05JrBw4t0KoVEdembEIySgiciCuZj7Zl) on GitLab Unfiltered.\n\nCover image by \u003Ca href=\"https://unsplash.com/@viniciusamano?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Vinicius \"amnx\" Amano\u003C/a> on \u003Ca href=\"https://unsplash.com/s/photos/complex-to-simple?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Unsplash\u003C/a>\n",[855,9,875],{"slug":6531,"featured":6,"template":686},"mobile-devops-with-gitlab-part-3-code-signing-for-ios-with-gitlab-and-fastlane","content:en-us:blog:mobile-devops-with-gitlab-part-3-code-signing-for-ios-with-gitlab-and-fastlane.yml","Mobile Devops With Gitlab Part 3 Code Signing For Ios With Gitlab And Fastlane","en-us/blog/mobile-devops-with-gitlab-part-3-code-signing-for-ios-with-gitlab-and-fastlane.yml","en-us/blog/mobile-devops-with-gitlab-part-3-code-signing-for-ios-with-gitlab-and-fastlane",{"_path":6537,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6538,"content":6544,"config":6549,"_id":6551,"_type":14,"title":6552,"_source":16,"_file":6553,"_stem":6554,"_extension":19},"/en-us/blog/mobile-static-application-security-testing-for-android",{"title":6539,"description":6540,"ogTitle":6539,"ogDescription":6540,"noIndex":6,"ogImage":6541,"ogUrl":6542,"ogSiteName":670,"ogType":671,"canonicalUrls":6542,"schema":6543},"Android App Security Testing with SAST","Learn how to secure your Android application with Static Application Security Testing.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666816/Blog/Hero%20Images/security-cover.png","https://about.gitlab.com/blog/mobile-static-application-security-testing-for-android","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Android App Security Testing with SAST\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2020-12-16\",\n      }",{"title":6539,"description":6540,"authors":6545,"heroImage":6541,"date":6546,"body":6547,"category":875,"tags":6548},[4451],"2020-12-16","\n\nAt GitLab, everyone can contribute! [GitLab 13.5](/releases/2020/10/22/gitlab-13-5-released/) included an [integration for Mobile Static\nApplication Security Testing (SAST)](/releases/2020/10/22/gitlab-13-5-released/#sast-support-for-ios-and-android-mobile-apps) from one of our customers. For their contribution, the \n[H-E-B Digital](https://digital.heb.com/) team were [October 2020's MVP](/releases/2020/10/22/gitlab-13-5-released/#mvp).\n\nTheir contribution enables SAST for mobile applications. This includes iOS apps written in Objective-C\nand Swift as well as Android apps written in Java and Kotlin. \n\nThis blog post will go over how Mobile SAST works on Android.\n\n## Static Application Security Testing\n\n[Static Application Security Testing](https://docs.gitlab.com/ee/user/application_security/sast/) analyzes source code for known vulnerabilities.\nSAST is used to detect potentially dangerous attributes in a class, or unsafe code that can\nlead to unintended code execution, as well as other issues such as SQL Injection. More information\non SAST can be seen in the [OWASP Documentation](https://owasp.org/www-community/controls/Static_Code_Analysis).\n\nHere is a video which goes over [setting up SAST for Mobile](https://docs.gitlab.com/ee/user/application_security/sast/#experimental-features), as well as a sample application\nyou can use to get started:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/v0GhEHZWtdw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nIn a nutshell, after the scanner has been configured, whenever an MR is created the\nscanner runs on the application source code and looks for patterns to determine if\nthat code is vulnerable. This is covered below.\n\nInitially this analyzer supports source code analysis but we intend to [expand support for binary\nscanning](https://gitlab.com/gitlab-org/gitlab/-/issues/269915) of .ipa and .apk files in the near future.\n\n## Understanding security rules\n\nSAST for mobile applications uses the Mobile Security Framework (MobSF) to scan source code. MobSF\nuses certain rules in order to determine if an application is vulnerable. The rules used to scan\nmobile applications can be seen in their [rules file](https://github.com/MobSF/Mobile-Security-Framework-MobSF/tree/master/StaticAnalyzer/views/android/rules).\nThese rules use [regex](https://en.wikipedia.org/wiki/Regular_expression) in order to find vulnerabilities in the static code.\n \nYou can also [contribute your own rules](https://github.com/MobSF/Mobile-Security-Framework-MobSF/blob/master/.github/CONTRIBUTING.md) if you have thoghts on enhancements.\nI made a small change to [enable a regex to work on Kotlin](https://github.com/MobSF/Mobile-Security-Framework-MobSF/pull/1611).\nNot only can everyone contribute at GitLab, we encourage team members to contribute to other open source projects.\n\nNote: You will have to test your changes before they can be approved. In order to do this, you must [install\nyour branch as seen here](https://mobsf.github.io/docs/#/installation).\n\n## Adding your own scanners\n\nGitLab allows for lots of extensibility. Using our [integration guidance](https://docs.gitlab.com/ee/development/integrations/secure.html), you can bring your own scanners into the\nmerge request pipeline and the security dashboards. This was done for MobSF SAST, as well as the [WhiteSource\nDependency Scanner](/blog/whitesource-for-dependency-scanning/).\n\nI hope you enjoyed this blog post. Now you can start making your Android applications more secure.\nYou can reach out on Twitter and share your thoughts with us [@GitLab](https://twitter.com/gitlab)!\n",[875,9,916,231,682],{"slug":6550,"featured":6,"template":686},"mobile-static-application-security-testing-for-android","content:en-us:blog:mobile-static-application-security-testing-for-android.yml","Mobile Static Application Security Testing For Android","en-us/blog/mobile-static-application-security-testing-for-android.yml","en-us/blog/mobile-static-application-security-testing-for-android",{"_path":6556,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6557,"content":6562,"config":6567,"_id":6569,"_type":14,"title":6570,"_source":16,"_file":6571,"_stem":6572,"_extension":19},"/en-us/blog/modernize-your-ci-cd",{"title":6558,"description":6559,"ogTitle":6558,"ogDescription":6559,"noIndex":6,"ogImage":1624,"ogUrl":6560,"ogSiteName":670,"ogType":671,"canonicalUrls":6560,"schema":6561},"3 CI/CD challenges to consider","If these DevOps challenges hit close to home, the right CI/CD could be the answer.","https://about.gitlab.com/blog/modernize-your-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"3 CI/CD challenges to consider\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-06-05\",\n      }",{"title":6558,"description":6559,"authors":6563,"heroImage":1624,"date":6564,"body":6565,"category":679,"tags":6566},[788],"2019-06-05","\n[Continuous integration and delivery](/solutions/continuous-integration/) helps DevOps teams ship higher quality software, faster. But is all [CI/CD](/topics/ci-cd/) created equal? What does successful CI/CD implementation look like and how do you know you’re on the right track?\n\nIn this four-part series, we talk about modernizing your CI/CD: Challenges, impact, outcomes, and solutions. Today, we’ll focus on [DevOps](/topics/devops/) challenges and situations where a comprehensive CI/CD approach could be the answer you’ve been looking for.\n\nIf these problems hit a little too close to home, stay tuned for part two where we dive deeper into how these roadblocks impact the rest of the SDLC.\n\n## What challenges do I face?\n\n### 1. Maintenance and integration costs, predominantly human resources costs.\n\nA large percentage of the overall IT budget goes to support teams of engineers needed to integrate and maintain a complex toolchain. An enterprise company with 1,000 developers could need up to 40 engineers just to maintain the DevOps toolchain instead of allocating these resources towards delivering business value.\n\n### 2. Development is slowed/blocked by the operations team.\n\nThe quintessential challenge of the pre-DevOps world is that dev teams are incentivized to increase innovation velocity by shipping new features. Operations teams are incentivized for stability, uptime, and error reduction. The higher the development velocity, the greater the chance for downtime and errors – so these teams are naturally at odds with each other. Dev leaders don’t always have enough enticing evidence or incentive to go to the Ops team to advocate for increased deployment velocity, and vice versa.\n\n### 3. Developers doing ops.\n\nToday, teams and individual developers base the code they produce on the capabilities of their environment rather than the needs of the business.\n\n## What do these look like in practice?\n\n### A big portion of resources and budget goes to undifferentiated integration and maintenance.\n\nTeams are siloed by their tools – each team has their favorite and is optimized to work within these specialized tools only. It is difficult to collaborate and troubleshoot across the stack due to a lack of visibility.\n\n### Code sometimes never gets to production at all.\n\nThere is a delay between code being written and driving value. When problems or errors arise and need to be sent back to the developer, it becomes difficult to troubleshoot because the code isn’t fresh in their mind (context switching). They have to stop working on their current project and go back to the previous code to troubleshoot. So much time might have passed that the code is no longer deployable in its current state. In addition to wasting time and money, this is demoralizing for the developer who doesn’t get to see the fruit of their labor.\n\n### Developers worry about environments, not business logic.\n\nEnvironment dependencies and configuration distracts developers from tasks they’re better equipped to handle. They may even be spending time trying to decide what size VM they need to deploy to. In this order “DevOps” means “Developers have to do both dev and ops.” Only a small percentage of developers actually enjoy this arrangement with most asking, “I’m a developer, please stop asking me to do operations.”\n\nIf you’ve already implemented CI/CD but are still experiencing these roadblocks, it might be time to modernize your CI/CD. We invite you to compare GitLab CI/CD to other CI tools and see why we were rated #1 in the Forrester CI Wave™.\n\n[Explore GitLab CI/CD](/solutions/continuous-integration/)\n{: .alert .alert-gitlab-purple .text-center}\n\nPhoto by [Jungwoo Hong](https://unsplash.com/photos/cYUMaCqMYvI?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/arrow?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,109,683],{"slug":6568,"featured":6,"template":686},"modernize-your-ci-cd","content:en-us:blog:modernize-your-ci-cd.yml","Modernize Your Ci Cd","en-us/blog/modernize-your-ci-cd.yml","en-us/blog/modernize-your-ci-cd",{"_path":6574,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6575,"content":6580,"config":6586,"_id":6588,"_type":14,"title":6589,"_source":16,"_file":6590,"_stem":6591,"_extension":19},"/en-us/blog/new-default-container-image-gitlab-saas-linux-runnners",{"title":6576,"description":6577,"ogTitle":6576,"ogDescription":6577,"noIndex":6,"ogImage":3558,"ogUrl":6578,"ogSiteName":670,"ogType":671,"canonicalUrls":6578,"schema":6579},"Using Ruby 3.1 as default on GitLab SaaS Linux runners","Learn about the new image and how to ensure CI job compatibility.","https://about.gitlab.com/blog/new-default-container-image-gitlab-saas-linux-runnners","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use Ruby 3.1 as the default container image on GitLab SaaS Runners on Linux\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darren Eastman\"}],\n        \"datePublished\": \"2022-12-13\",\n      }",{"title":6581,"description":6577,"authors":6582,"heroImage":3558,"date":6583,"body":6584,"category":791,"tags":6585},"How to use Ruby 3.1 as the default container image on GitLab SaaS Runners on Linux",[3563],"2022-12-13","\nOn January 12, 2023, we will change the [default container](https://docs.gitlab.com/ee/ci/runners/saas/linux_saas_runner.html) image used on GitLab Saas Runners on Linux from Ruby 2.5, which is end of life, to Ruby 3.1.\n\nIf you have specified a container image in your CI/CD job, then there is no impact to you. In other words, your GitLab SaaS CI/CD job will only run in the default container if no image is set for the job in the `.gitlab-ci.yml` pipeline file.\n\nTo check, open the log view of a CI job and note the image used. For example, if you have not added an image to your CI job on GitLab SaaS, then the job log will have the following:\n\n```\nUsing Docker executor with image ruby:2.5 ...\n\n```\n\nIf you have not set a container image in your CI job, then after this change, the job will run in a Ruby 3.1 container.\n\n## How can I check for any build issues on Ruby 3.1?\n\nWhile it is not expected that running a CI/CD job on Ruby 2.5 is incompatible with Ruby 3.1, to check, simply configure the job to run in a Ruby 3.1 container. To do so, edit the `.gitlab-ci.yml` and add the following:\n\n```\ndefault:\n  image: ruby:3.1\n```\n\n## Future plans\n\nIn addition to this change, we plan to [define](https://gitlab.com/gitlab-org/gitlab/-/issues/384992) a new container image maintenance process for GitLab SaaS Runners on Linux. The new policy aims to ensure that the default image used is updated so that it contains the latest security fixes.\n\n_This blog post and linked pages contain information related to upcoming products, features, and functionality. It is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog post and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab Inc._\n\n",[9,728,976,977,231],{"slug":6587,"featured":6,"template":686},"new-default-container-image-gitlab-saas-linux-runnners","content:en-us:blog:new-default-container-image-gitlab-saas-linux-runnners.yml","New Default Container Image Gitlab Saas Linux Runnners","en-us/blog/new-default-container-image-gitlab-saas-linux-runnners.yml","en-us/blog/new-default-container-image-gitlab-saas-linux-runnners",{"_path":6593,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6594,"content":6600,"config":6605,"_id":6607,"_type":14,"title":6608,"_source":16,"_file":6609,"_stem":6610,"_extension":19},"/en-us/blog/new-features-to-core",{"title":6595,"description":6596,"ogTitle":6595,"ogDescription":6596,"noIndex":6,"ogImage":6597,"ogUrl":6598,"ogSiteName":670,"ogType":671,"canonicalUrls":6598,"schema":6599},"18 GitLab features are moving to open source","We're open sourcing rich functionality across Plan, Create, Verify, Package, Release, Configure, and Protect.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667021/Blog/Hero%20Images/newnature_cropped.png","https://about.gitlab.com/blog/new-features-to-core","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"18 GitLab features are moving to open source\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2020-03-30\",\n      }",{"title":6595,"description":6596,"authors":6601,"heroImage":6597,"date":6602,"body":6603,"category":726,"tags":6604},[1609],"2020-03-30","I spent some time reviewing GitLab features and determined that, by our [Buyer-Based Open Core](/company/pricing/#the-likely-type-of-buyer-determines-what-features-go-in-what-tier) model, eighteen features that appear in seven different stages of the [DevOps lifecycle](/topics/devops/) ought to be open source.\n\nWhen we rolled out our Buyer-Based Open Core model in 2018, what we laid out is that features are assigned to each of our four individual tiers based on who the buyer of the feature is.\nFeatures that serve an individual contributor land in Core/Free.\nFeatures for managers land in Starter/Bronze, for directors in Premium/Silver, and executives in Ultimate/Gold.\nAs we explain the reasoning on [our pricing page](/company/pricing/#the-likely-type-of-buyer-determines-what-features-go-in-what-tier),\n> The feature is put in the plan based on what champion is most likely to **care** about it.\n> Buyers make sense, since a higher-cost plan needs a higher-placed buyer.\n\nThis pricing model has served us well, and we've been committed to it.\nBut, somewhere along the way, we failed to do an audit of many existing features.\nThat's what I did last month, and now I'm excited to share that after personally reviewing all features in each of our tiers **we are open sourcing an unprecedented number of GitLab features**.\n\nThis marks a major milestone in our efforts to empower the community to collaborate more robustly and to take our single tool for the DevOps lifecycle to the next level.\nFrom design management to package managers, managing multiple Kubernetes clusters to connecting related issues, we're making it easier than ever for an individual contributor to have everything they need to plan, build, deploy, and secure their application with GitLab.\n\n## It's not enough to talk the talk – we need to walk the walk.\n\nIf we're saying that [our features are based on the buyer](/company/pricing/#four-tiers), then we need to make sure that the right features are in the right place.\nWe've always been committed to our [stewardship](/company/stewardship/#how-open-source-benefits-from-open-core) of GitLab as an open source project.\nBy auditing the tier of features, we can better serve our open source community while more accurately aligning our business model.\nOur commitment to the open source community is why we will always work to [move features down our tiers](/company/pricing/#well-always-move-features-down)\nand [doing so quickly](/company/pricing/#if-a-feature-can-be-moved-down-do-it-quickly) and consistently.\n\n## We hope to unleash the power of everyone’s creativity\n\nOur mission has always been that [everyone can contribute](/company/mission/#mission).\nWith new functionality available to all users, it's easier than ever to contribute - contribute with GitLab, contribute to GitLab the application, or contribute to GitLab the company.\nSee something, submit a Merge Request (MR).\n\nWe recognize that many users in our community have creative ideas on how to make GitLab an even better product.\nBy partnering with the open-source community, we can open-source features even more quickly.\n\n## What's moving?\n\n![devops lifecycle](https://about.gitlab.com/images/blogimages/dev-ops-plan-to-monitor.png){: .medium.center}\n\nFeatures from Plan, Create, Verify, Package, Release, Configure, and Protect are moving.\nThis is *a lot* of features.\nWhile we've outlined all of these features that are ready to be moved to Core/Free, we need your help to move them.\n\nThe work to move the actual code to the open source part of the codebase is defined in issues that are linked from this blog post. These issues will go into the backlog for each of the respective product development teams and will be prioritized against new feature development.\nIf having this functionality in Core/Free is important to you, we invite you to contribute yourself to speed up the process.\nWe're not just giving you permission to help us move this code - we're asking you to help us move it.\n\n#### Synchronize collaboration with [Plan](/pricing/feature-comparison/)\n\n[Issues](https://docs.gitlab.com/ee/user/project/issues/) are the primary way people collaborate on ideas and plan work in GitLab.\nBy open sourcing these new features, we're making it easier than ever to plan your projects.\nWe can't wait to see what you come up with.\n* [Related issues](https://docs.gitlab.com/ee/user/project/issues/related_issues.html#related-issues): Connect related issues together.\n* [Export issues](https://docs.gitlab.com/ee/user/project/issues/csv_export.html): Export issues from GitLab as a CSV and receive the CSV as an attachment to your default notification email.\n* [Issue board focus mode](https://docs.gitlab.com/ee/user/project/issue_board.html#focus-mode): Use this tool to plan, organize, and visualize a workflow for a feature or product release. It can be used as a Kanban or a Scrum board.\n\n[Service desk](https://docs.gitlab.com/ee/user/project/service_desk.html) allows your team to connect directly with any external party through email right inside of GitLab –\nno external tools required.\nWith that, the complexity and inefficiencies of multiple tools are eliminated, significantly shortening the cycle time from feedback to software updates.\nWe would love to hear how you leverage service desk in your workflows now that it's open source.\n\n| Feature to move        | GitLab Issue |\n|------------------------|--------------|\n| Related issues         | [gitlab-org/gitlab#212329](https://gitlab.com/gitlab-org/gitlab/-/issues/212329) |\n| Export issues          | [gitlab-org/gitlab#212330](https://gitlab.com/gitlab-org/gitlab/-/issues/212330) |\n| Issue board focus mode | [gitlab-org/gitlab#212331](https://gitlab.com/gitlab-org/gitlab/-/issues/212331) |\n| Service desk           | [gitlab-org/gitlab#212332](https://gitlab.com/gitlab-org/gitlab/-/issues/212332) |\n\n#### Build better code and branch powerfully with [Create](/pricing/feature-comparison/)\n\nThe machine you're using shouldn't limit how easy it is to develop.\n\nWe're excited to bring down two features for developing in web-first environments.\n* The [Web Terminal for Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/index.html#interactive-web-terminals-for-the-web-ide) makes it faster and easier to contribute changes to projects.\n* [File syncing to web terminal](https://docs.gitlab.com/ee/user/project/web_ide/#file-syncing-to-web-terminal) in the Web IDE helps you test code changes in a preconfigured terminal environment.\n\n[Design management](https://docs.gitlab.com/ee/user/project/issues/design_management.html#design-management) allows you to upload design assets (wireframes, mockups, etc.) to GitLab issues and keep them stored in one single place, accessed by the Design management’s page within an issue, ensuring issues are the single source of truth for everything required to develop a feature.\n\nAll together, these changes to create should make it easier to go from wireframe to MVC in the blink of an eye – independent of what machine you're on – improving project efficiency.\n\n| Feature to move        | GitLab Issue |\n|------------------------|--------------|\n| Web Terminal for Web IDE | [gitlab-org/gitlab#211685](https://gitlab.com/gitlab-org/gitlab/-/issues/211685) |\n| File syncing to the web terminal | [gitlab-org/gitlab#211686](https://gitlab.com/gitlab-org/gitlab/-/issues/211686) |\n| Design Management | [gitlab-org/gitlab#212566](https://gitlab.com/gitlab-org/gitlab/-/issues/212566) |\n\n#### Bring code quality to new heights with [Verify](/pricing/feature-comparison/)\n\n[Code quality](https://docs.gitlab.com/ee/ci/testing/code_quality.html) reports on MRs will be open source.\nKeeping your project’s code simple, readable, and easy to contribute to is difficult.\nCode quality on MRs makes this easier to do and maintain.\n\n| Feature to move        | GitLab Issue |\n|------------------------|--------------|\n| Code quality | [gitlab-org/gitlab#212499](https://gitlab.com/gitlab-org/gitlab/-/issues/212499) |\n\n#### Build and share packages in [Package](/pricing/feature-comparison/)\n\nWe're delivering a set of package managers so all your packages can stay in one place:\n* [Conan (C/C++) repository](https://docs.gitlab.com/ee/user/packages/conan_repository/)\n* [Maven (Java) repository](https://docs.gitlab.com/ee/user/packages/maven_repository/index.html)\n* [NPM (node) registry](https://docs.gitlab.com/ee/user/packages/npm_registry/index.html)\n* [NuGet (.NET) repository](https://docs.gitlab.com/ee/user/packages/nuget_repository/)\n\n| Feature to move        | GitLab Issue |\n|------------------------|--------------|\n| Package Managers | [gitlab-org&2867](https://gitlab.com/groups/gitlab-org/-/epics/2867) |\n\n#### Continuous delivery is simpler with [Release](/pricing/feature-comparison/)\n\nWith **four** incredible [Release](/pricing/feature-comparison/) features moving to Core/Free, you can be so confident in your releases that you deploy on Fridays (YMMV).\n\n* [Canary deployments](https://docs.gitlab.com/ee/user/project/canary_deployments.html) roll out the new version of your application to a small portion of your fleet.\n* [Incremental rollout](https://docs.gitlab.com/ee/topics/autodevops/index.html#incremental-rollout-to-production) allows you to first check how the new version of your application is behaving before increasing the rollout to 100%.\n* [Feature flags](https://docs.gitlab.com/ee/operations/feature_flags.html) allow you to ship a project in different flavors by dynamically toggling certain functionality.\n* [Deploy boards](https://docs.gitlab.com/ee/user/project/deploy_boards.html) offer a consolidated view of the current health and status of each CI environment running on Kubernetes. You can see the progress and status of a rollout, pod by pod, within your existing workflows without having to access Kubernetes.\n\n| Feature to move        | GitLab Issue |\n|------------------------|--------------|\n| Canary deployments     | [gitlab-org/gitlab#212319](https://gitlab.com/gitlab-org/gitlab/-/issues/212319) |\n| Incremental rollout    | [gitlab-org/gitlab#212316](https://gitlab.com/gitlab-org/gitlab/-/issues/212316) |\n| Feature flags          | [gitlab-org/gitlab#212318](https://gitlab.com/gitlab-org/gitlab/-/issues/212318) |\n| Deploy boards          | [gitlab-org/gitlab#212320](https://gitlab.com/gitlab-org/gitlab/-/issues/212320) |\n\n#### Support for multiple Kubernetes clusters in [Configure](/pricing/feature-comparison/)\n\nWith support for [multiple Kubernetes clusters](https://docs.gitlab.com/ee/user/project/clusters/#multiple-kubernetes-clusters), you will be able to easily deploy different environments, like Staging and Production, to different Kubernetes clusters.\nThis allows you to enforce strict data separation.\n\n| Feature to move        | GitLab Issue |\n|------------------------|--------------|\n| Support for multiple Kubernetes clusters | [gitlab-org/gitlab#212229](https://gitlab.com/gitlab-org/gitlab/-/issues/212229) |\n\n#### Bolster application security with [Protect](/pricing/feature-comparison/)\n\nProtect your apps and infrastructure from security intrusions.\n[Network policies for container network security](https://docs.gitlab.com/ee/update/removals.html) will be available to all users.\nWith that, you can install network policies into GitLab-managed Kubernetes clusters to limit communication between Kubernetes pods.\n\n| Feature to move        | GitLab Issue |\n|------------------------|--------------|\n| Network policies for container network security | [gitlab-org/gitlab#212571](https://gitlab.com/gitlab-org/gitlab/-/issues/212571) |\n\nWe hope that by open sourcing these features we will make it easier for all users to treat GitLab as a single application for the entire DevOps lifecycle. **We are thrilled about the limitless possibilities ahead of us as a community and we're looking forward to collaborating closely with each of you!**\n\nCover image by [Rodrigo Soares](https://unsplash.com/@rodi01?utm_medium=referral&amp;utm_campaign=photographer-credit&amp;utm_content=creditBadge) on [Unsplash](https://unsplash.com/photos/c6SciRp2kaQ?modal=%7B%22tag%22%3A%22CreditBadge%22%2C%22value%22%3A%7B%22userId%22%3A%22Tq225eP_V5M%22%7D%7D)",[9,682],{"slug":6606,"featured":6,"template":686},"new-features-to-core","content:en-us:blog:new-features-to-core.yml","New Features To Core","en-us/blog/new-features-to-core.yml","en-us/blog/new-features-to-core",{"_path":6612,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6613,"content":6618,"config":6623,"_id":6625,"_type":14,"title":6626,"_source":16,"_file":6627,"_stem":6628,"_extension":19},"/en-us/blog/new-to-devops-take-our-devops-for-beginners-quiz",{"title":6614,"description":6615,"ogTitle":6614,"ogDescription":6615,"noIndex":6,"ogImage":4540,"ogUrl":6616,"ogSiteName":670,"ogType":671,"canonicalUrls":6616,"schema":6617},"New to DevOps? Take our DevOps for beginners quiz","We asked nearly 1400 DevOps beginners about their priorities and challenges for 2022. See how you compare, and take our short DevOps for beginners quiz.","https://about.gitlab.com/blog/new-to-devops-take-our-devops-for-beginners-quiz","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"New to DevOps? Take our DevOps for beginners quiz\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-09-13\",\n      }",{"title":6614,"description":6615,"authors":6619,"heroImage":4540,"date":6620,"body":6621,"category":769,"tags":6622},[745],"2022-09-13","__Update: The quiz mentioned here has been closed.__\n\nOver the last 12 months, we’ve asked three [\"DevOps for beginners\"](https://about.gitlab.com/topics/devops/beginner-devops-platform/) questions of nearly 1400 people:\n\n- What’s the most important skill you hope to learn this year?\n- What continues to be your team’s biggest DevOps challenge?\n- What is your DevOps team’s top priority for 2022?\n\nA resounding majority (nearly 83%) told us they want to learn a new programming language and about 15% hope to get better at automation.\n\n(Learn the basics of Python with our [5-part series](/blog/learn-python-with-pj-part-1/), understand [Rust](/blog/rust-programming-language/), or [get started with CI/CD](/blog/beginner-guide-ci-cd/).)\n\nWhat are they struggling with?\n\nJust over 70% said security was the biggest challenge for their DevOps team this year (a result that tracks with our just released [Global DevSecOps Survey](/developer-survey/)), while just shy of 24% said it was testing (again, that’s [a very common complaint](/blog/want-faster-releases-your-answer-lies-in-automated-software-testing/)). \n\nThe top priorities for 2022 were split between increasing automation (47%) and moving to a [DevOps platform](/topics/devops-platform/) (23%).\n\nAnd we have more DevOps for beginner resources here:\n\n[Beginner’s Guide to DevOps eBook](https://page.gitlab.com/resources-ebook-beginners-guide-devops.html)\n\nA [step-by-step](/blog/if-its-time-to-learn-devops-heres-where-to-begin/) look at how to get started with DevOps\n\nA [guide to Git for beginners](/blog/beginner-git-guide/)\n\n[Continuous integration](/blog/a-beginners-guide-to-continuous-integration/)for beginners\n",[9,681,1339],{"slug":6624,"featured":6,"template":686},"new-to-devops-take-our-devops-for-beginners-quiz","content:en-us:blog:new-to-devops-take-our-devops-for-beginners-quiz.yml","New To Devops Take Our Devops For Beginners Quiz","en-us/blog/new-to-devops-take-our-devops-for-beginners-quiz.yml","en-us/blog/new-to-devops-take-our-devops-for-beginners-quiz",{"_path":6630,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6631,"content":6637,"config":6641,"_id":6643,"_type":14,"title":6644,"_source":16,"_file":6645,"_stem":6646,"_extension":19},"/en-us/blog/new-year-new-programming-language",{"title":6632,"description":6633,"ogTitle":6632,"ogDescription":6633,"noIndex":6,"ogImage":6634,"ogUrl":6635,"ogSiteName":670,"ogType":671,"canonicalUrls":6635,"schema":6636},"New year, new programming language","Use the calendar turnover as an excuse to spark your curiosity and learn one of the hottest programming languages.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668524/Blog/Hero%20Images/closeup-photo-of-black-and-blue-keyboard-1194713.jpg","https://about.gitlab.com/blog/new-year-new-programming-language","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"New year, new programming language\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-01-13\",\n      }",{"title":6632,"description":6633,"authors":6638,"heroImage":6634,"date":3564,"body":6639,"category":769,"tags":6640},[851],"\nIt’s 2022, time to learn something new. So how about studying a new programming language? Keeping your [dev skills sharpened](/blog/the-top-skills-you-need-to-get-your-devops-dream-job/) and gaining fluency in more than one language is ideal for DevOps pros, according to DevOps Institute’s [2021 Upskilling Report](https://info.devopsinstitute.com/2021-upskilling-report-download).\n\nBut with all the new programming languages around, it can be tricky to know where to begin. [Stack Overflow’s 2021 Survey](https://insights.stackoverflow.com/survey/2021) found devs were most interested in learning Python, JavaScript, and Go. JavaScript is a fairly ubiquitous language, so let’s look instead at tutorials and advice for some up-and-coming languages, including Python, Go, Rust, Groovy, and Kotlin.\n\n## The promise of Python\n\nPython is a very popular second or third language for websites, analytics, and all things DevOps. It’s also very easy to [start learning](/blog/beginner-guide-python-programming/). Python.org offers [a free tutorial](https://www.python.org/about/gettingstarted/). There is also an [interactive option](https://www.learnpython.org).\n\n## Go for the gold\n\nAnother language to consider is Go because its proponents say it’s incredibly easy to learn and use. Go is so interesting that GitLab Staff Developer Evangelist [Brendan O’Leary](/company/team/#brendan) is going to learn it this year [and plans to share the journey on his blog](https://boleary.dev/blog/2022-01-10-new-year-new-language.html). Learn the [basics of Go](https://go.dev/doc/tutorial/getting-started) and then consider diving into [Go by Example](https://gobyexample.com).\n\nOnce you’re done hitting the books, tackle a real-world challenge, like [using Go for CI](/blog/go-tools-and-gitlab-how-to-do-continuous-integration-like-a-boss/).\n\n## All about Rust\n\nIt’s safe to say the devs who know and use Rust _love_ Rust. But, to be fair, it’s not necessarily the easiest language to learn. That said, if the goal is secure code, [Rust is a solid choice](/blog/rust-programming-language/). To try Rust on for size, devs can either [read The Rust Programming Language book](https://doc.rust-lang.org/book/) or try [the Rustlings course](https://github.com/rust-lang/rustlings/). Overachievers might want to [learn how to fuzz Rust code](/blog/how-to-fuzz-rust-code/). \n\n## Feeling Groovy\n\nGroovy is all about scripting and, as such, is ideal for those wanting to learn automation. Also, Groovy works side-by-side with Java, meaning it’s going to be a language that comes easily to those devs. [Get started with Groovy](https://www.guru99.com/groovy-tutorial.html). For a deeper dive, here’s [a list of books about Groovy](https://groovy-lang.org/learn.html).\n\n## Create with Kotlin\n\nApparently Kotlin is a programming language that [makes developers happier](https://kotlinlang.org) and is ideal for data science projects and Android apps. If you want to be a happier developer, too, [here’s how to get started with Kotlin](https://www.codecademy.com/learn/learn-kotlin/modules/learn-kotlin-introduction-to-kotlin). Google also offers a [bootcamp for Kotlin developers](https://www.udacity.com/course/kotlin-bootcamp-for-programmers--ud9011).\n\n## Bonus round: Use Python and Rust together\n\nBecause there is no point in learning a new programming language unless you can use it, here’s a step-by-step guide to bringing your application idea to production [using Python, Rust, and GitLab CI](/blog/python-rust-and-gitlab-ci/).\n",[9,813],{"slug":6642,"featured":6,"template":686},"new-year-new-programming-language","content:en-us:blog:new-year-new-programming-language.yml","New Year New Programming Language","en-us/blog/new-year-new-programming-language.yml","en-us/blog/new-year-new-programming-language",{"_path":6648,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6649,"content":6653,"config":6657,"_id":6659,"_type":14,"title":6660,"_source":16,"_file":6661,"_stem":6662,"_extension":19},"/en-us/blog/next-generation-container-registry",{"title":6650,"description":3577,"ogTitle":6650,"ogDescription":3577,"noIndex":6,"ogImage":2055,"ogUrl":6651,"ogSiteName":670,"ogType":671,"canonicalUrls":6651,"schema":6652},"Introducing the next generation of the GitLab.com Container Registry","https://about.gitlab.com/blog/next-generation-container-registry","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing the next generation of the GitLab.com Container Registry\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Rizzi\"}],\n        \"datePublished\": \"2022-04-12\",\n      }",{"title":6650,"description":3577,"authors":6654,"heroImage":2055,"date":872,"body":6655,"category":726,"tags":6656},[3582],"\n\n_This blog post and linked pages contain information related to upcoming products, features, and functionality. It is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog post and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab Inc._\n\nIn the coming weeks, we will begin the second phase of the rollout of the new version of the Container Registry on GitLab.com. Prior to deploying this update, we wanted to clearly communicate the planned changes, what to expect, and why we are excited.\n\nIf you have any questions or concerns, please don't hesitate to comment in the [epic](https://gitlab.com/groups/gitlab-org/-/epics/5523).\n\n## Context \n\nIn [Milestone 8.8](/releases/2016/05/22/gitlab-8-8-released/), GitLab launched the MVC of the Container Registry. This feature integrated the Docker Distribution registry into GitLab so that any GitLab user could have a space to publish and share container images.\n\nBut there was an inherent limitation with Docker Distribution, as all metadata associated with a given image/tag was stored in the object storage backend. This made using that metadata to build API features (like storage usage visibility, sorting, and filtering) unfeasible. The most recent Container Registry update added a new PostgreSQL backend, which is used to store the metadata. Additionally, this new version also includes an automatic online garbage collector to remove untagged images and recover storage space.\n\nIn November 2021, we started [phase 1](/blog/gitlab-com-container-registry-update/) of the migration. This completed in January 2022 without any significant issues. Since then, every new image repository pushed to GitLab.com uses the new, metadata database-backed registry. Today, nearly 20% of Container Registry traffic is already routed to the new version.\n\nNow we are ready to begin [Phase 2 of the migration](https://gitlab.com/gitlab-org/container-registry/-/issues/374#phase-2-migrate-existing-repositories). This will migrate image repositories created before January 22, 2022, to the new Container Registry. Once complete, we can unblock many of the features that you've been asking for.\n\n## Why we are excited \n\n- [Storage visibility for the Container Registry](https://gitlab.com/groups/gitlab-org/-/epics/7225)\n\n- Performance improvements for list operations when using the GitLab API and UI\n\n- [Redesign of the UI](https://gitlab.com/groups/gitlab-org/-/epics/3211)\n  - [Build and commit metadata for tags built via CI](https://gitlab.com/gitlab-org/gitlab/-/issues/197996)\n  - [Search by tag name](https://gitlab.com/gitlab-org/gitlab/-/issues/255614)\n  \n- [Resolve: Group/project path updates break the Container Registry](https://gitlab.com/gitlab-org/gitlab/-/issues/18383)\n\n## The plan \n\nWe're planning a [phased migration](https://gitlab.com/gitlab-org/container-registry/-/issues/374#phase-2-migrate-existing-repositories), starting with GitLab.org repositories. After that, we'll move on to the Free tier, then on to Premium and Ultimate. We'll roll this out incrementally to maintain safety for customers and provide our team with an opportunity to identify and address any concerns.\n\n## Timing \n\nMigration begins: April 18th, 2022\nMigration ends: July 8th, 2022.\n\nTentative dates by tier:\n\n- GitLab internal projects: April 14 - April 18\n- Free: April 18 - May 18\n- Premium: May 18 to June 18\n- Ultimate: June 18 to July 8\n\nFor more information about the planned, percentage-based rollout, please refer to this [epic](https://gitlab.com/groups/gitlab-org/-/epics/6427).\n\n## What to expect\n\n- For each repository, the migration will only target _tagged_ images. Untagged and unreferenced manifests, and the layers they reference, will be left behind and become inaccessible. Untagged images were never visible through the GitLab UI or API, but they were left behind in the backend after becoming dangling.\n\n- Once migrated to the new registry, repositories will be subject to continuous online garbage collection, deleting any untagged and unreferenced manifests and layers that remain as such for longer than 24 hours.\n\n- To ensure data consistency, the migration of each repository requires the enforcement of a small read-only period at the very end. This period is expected to be less than ten seconds for the vast majority of repositories. During this period, an error message will be returned when trying to upload or delete data, prompting clients to try again. Most clients, will automatically retry several times, which should eventually succeed as the read-only enforcement lifts. We also put a mechanism in place to automatically cancel and reschedule migrations that are taking longer than expected. Nevertheless, if you experience any issues, please comment in the [epic](https://gitlab.com/groups/gitlab-org/-/epics/5523).\n\n## FAQ \n\n- Do I need to do anything?\n  - No, the process is fully automated. But if you have any untagged images that you'd like to preserve, please be sure to tag them as soon as possible.\n\n- Is there anything I can do to help? \n  - Yes! Although no action is necessary, we recommend activating the Container Registry [cleanup policies](https://docs.gitlab.com/ee/user/packages/container_registry/#cleanup-policy) for any relevant projects.\n\n- Is the update required? \n  - Yes. With this change, we can deliver a more modern and scalable product. You don't want to miss out on those features!\n",[9,728,231],{"slug":6658,"featured":6,"template":686},"next-generation-container-registry","content:en-us:blog:next-generation-container-registry.yml","Next Generation Container Registry","en-us/blog/next-generation-container-registry.yml","en-us/blog/next-generation-container-registry",{"_path":6664,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6665,"content":6670,"config":6675,"_id":6677,"_type":14,"title":6678,"_source":16,"_file":6679,"_stem":6680,"_extension":19},"/en-us/blog/observability-is-key-to-cloud-native-transitions-and-modern-application-development",{"title":6666,"description":6667,"ogTitle":6666,"ogDescription":6667,"noIndex":6,"ogImage":668,"ogUrl":6668,"ogSiteName":670,"ogType":671,"canonicalUrls":6668,"schema":6669},"Observability's role in cloud-native app development","Want better visibility into the entire software development lifecycle across environments? Learn how observability can help.","https://about.gitlab.com/blog/observability-is-key-to-cloud-native-transitions-and-modern-application-development","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Observability is key to cloud-native transitions and modern application development\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2022-04-05\",\n      }",{"title":6671,"description":6667,"authors":6672,"heroImage":668,"date":4088,"body":6673,"category":679,"tags":6674},"Observability is key to cloud-native transitions and modern application development",[1454],"\n\n_This blog post and linked pages contain information related to upcoming products, features, and functionality. It is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes._\n\nModern application development requires DevOps teams to be able to collaborate and react to what is happening across the software development lifecycle. Yet, as companies move away from monolithic code bases resident on a server or cluster of virtual machines to cloud-native environments, this goal becomes more difficult to achieve. Cloud-native architectures are more complex with more elements to configure, protect, execute, and measure. To ensure maximum visibility and responsiveness to issues early on in application development and throughout the lifecycle, companies are adopting observability.\n\n## Observability defined\n\nObservability, which [451 Research](https://451research.com/) defines as the collection and analysis of data logs, metrics, and traces, becomes critical and essential with cloud-native technologies and acts as a step beyond monitoring. “The need for such an approach has been brought to the fore by complex, distributed microservices-based applications where the variables are so numerous that it can be impossible to know exactly what metrics need to be collected for the gamut of potential events that could arise,” 451 Research’s “Voice of the Enterprise: DevOps, Organizational Dynamics - Advisory Report” states.\n\n“A need to know what is happening with infrastructure and applications, particularly across hybrid and multi-cloud infrastructure, has driven broad adoption of observability,” according to the report.\n\n## How observability improves cloud-native tech adoption\n\nMore than half of organizations surveyed by 451 Research report either full adoption or some adoption at the team level of cloud-native technologies such as containers, Kubernetes, service mesh, and serverless computing. Another quarter to one-third of respondents plans to deploy cloud-native technologies.\n\nThe challenge is visibility across this new, more complex architecture. While cloud-native technologies offer more flexibility and cost efficiencies for computing resources, they can make it difficult to gain end-to-end visibility of software vulnerabilities, application performance, and quality assessments, and to be able to know where and how to affect change early on in the development lifecycle.\n\nDevOps improvements such as security and analytics are driving the adoption of observability, as is the increased need for compliance. With observability, according to 451 Research’s report, “one can query the data they have and ask any number of questions about a system, and, ideally, get an answer without having to predefine the exact data collected or tagging applied to answer the question.”\n\nIn other words, observability can provide a more flexible toolkit and enable a more active drill-down into what’s actually happening in the development lifecycle. With properly implemented observability, DevOps teams can, in real-time, identify a problem, fix it, benchmark the improvement, and measure it going forward – even in a cloud-native environment that is abstracted from knowledge of underlying systems. Having the ability to observe and measure your end-to-end DevOps efforts can reduce risk and provide greater control of cloud-native environments. \n\nDigital transformation leaders and laggards alike understand the need for observability. Nearly two-thirds of all respondents say they have adopted observability (41%) or have it in discovery/proof of concept (23%). Nearly a third plan to implement it within 12 to 24 months.\n\n“While it is great to see these adoption rates, the ultimate goal is to evolve observability’s inputs into actionable insights that positively impact the business,” says Sebastien Pahl, principal product manager at GitLab and co-founder of observability start-up OpsTrace (which was [acquired by GitLab in 2021](/press/releases/2021-12-14-gitlab-acquires-opstrace-to-expand-its-devops-platform-with-open-source-observability-solution.html)).\n\n## The benefits of observability\n\nIn modern application development, dev, sec, and ops teams share the responsibility of software development and delivery. In mature organizations, DevOps can extend to include stakeholders from compliance, legal, finance, and other departments with a direct stake in value delivery. Observability provides DevOps teams greater flexibility in how to utilize and share data across an organization.\n\nPahl likens observability to a flight crew being able to see, learn from, and react to all the data from instruments and dashboards on a plane as it is flying. “With observability, everyone can look at the same data through a different lens,” he says.\n\nObservability has significant benefits, including the following:\n\n- Developers can add code early in the development lifecycle for events they want to observe.\n\n- DevOps teams can move faster because they know when something is wrong and exactly what is wrong. They can fix problems once and move on.\n\n- Organizations can detect problems before customers do.\n\n- DevOps teams can assign certain alerts to specific individuals or teams so ops teams won’t be burned out responding to general alerts.\n\n- The inputs and metrics written through observability lay the foundation for AI and machine learning.\n\n## Observability and the DevOps Platform\n\nGitLab believes that [observability is foundational](https://opstrace.com/blog/gitlabobsvervabilityui) to a DevOps platform, and will make the capability available to all GitLab users. [Our vision](/direction/monitor/) is to make every GitLab project observable by default, with features that are easy to operate without specialized, expert skills. Teams can connect the dots between every deployment, incident, and other noteworthy events using and collaborating with telemetry data, which ultimately decreases the frequency and severity of production issues.\n\nGitLab’s observability capability is completely open-sourced and relies on open APIs such as Prometheus and OpenTelemetry so users don’t have to worry about vendor lock-in from instrumentation to alerting. It’s built into the GitLab DevOps platform to help you use the capability right away within your native workflow.\n\nLearn more about [observability and the DevOps Platform](https://about.gitlab.com/).\n\n\n\n\n\n\n\n",[9,1041,682,916],{"slug":6676,"featured":6,"template":686},"observability-is-key-to-cloud-native-transitions-and-modern-application-development","content:en-us:blog:observability-is-key-to-cloud-native-transitions-and-modern-application-development.yml","Observability Is Key To Cloud Native Transitions And Modern Application Development","en-us/blog/observability-is-key-to-cloud-native-transitions-and-modern-application-development.yml","en-us/blog/observability-is-key-to-cloud-native-transitions-and-modern-application-development",{"_path":6682,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6683,"content":6689,"config":6694,"_id":6696,"_type":14,"title":6697,"_source":16,"_file":6698,"_stem":6699,"_extension":19},"/en-us/blog/observability-vs-monitoring-in-devops",{"title":6684,"description":6685,"ogTitle":6684,"ogDescription":6685,"noIndex":6,"ogImage":6686,"ogUrl":6687,"ogSiteName":670,"ogType":671,"canonicalUrls":6687,"schema":6688},"Observability vs. monitoring in DevOps","Want to gain true and actionable visibility across your software development lifecycle? Observability is the answer.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665484/Blog/Hero%20Images/monitoring-update-feature-image.jpg","https://about.gitlab.com/blog/observability-vs-monitoring-in-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Observability vs. monitoring in DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mike Vanbuskirk\"}],\n        \"datePublished\": \"2022-06-14\",\n      }",{"title":6684,"description":6685,"authors":6690,"heroImage":6686,"date":6691,"body":6692,"category":791,"tags":6693},[5134],"2022-06-14","\nIn almost any modern software infrastructure, there is inevitably some form of monitoring or logging. The launch of syslog for Unix systems in the 1980s established both the value of being able to audit and understand what is going on inside a system, as well as the architectural importance of separating that mechanism.\n\nHowever, despite the value and importance of this visibility into system behavior, too often monitoring and logging are treated as an afterthought. There are countless instances of systems emitting logs into a void, never being aggregated or analyzed for critical information. Or infrastructure where legacy monitoring systems were installed a decade ago and never updated to modern standards.\n\nRecently, shifts in the operational landscape have given rise to the concept of observability. Rather than expect engineers to form their own assumptions about how their application is performing from static measurements, observability enables them to see a holistic picture of their application behavior, and critically, how a user perceives performance.\n\n> You’re invited! Join us on June 23rd for the [GitLab 15 launch event](https://page.gitlab.com/fifteen) with DevOps guru Gene Kim and several GitLab leaders. They’ll show you what they see for the future of DevOps and The One DevOps Platform.\n\n## What is observability?\nTo understand the value in observability, it's helpful to first establish an understanding of what monitoring is, as well as what it does and does not provide in terms of information and context.\n\nAt its core, monitoring is presenting the results of measurements of different values and outputs of a given system or software stack. Common metrics for measurement are things like CPU usage, RAM usage, and response time or latency. Classic logging systems are similar; a static piece of information about an event that occurred during system operation.\n\nMonitoring provides limited-context measurements that might indicate a larger issue with the system. Aggregation and correlation are possible using traditional monitoring tools, but typically require manual configuration and tuning to provide a holistic view. As the industry has advanced, the concept of what makes for effective monitoring has moved beyond static measurements of things like CPU usage. In its now-famous SRE book, Google emphasizes that you should focus on four key metrics, known as \"[Golden Signals](https://sre.google/sre-book/monitoring-distributed-systems/)\":\n\n- Latency: The time it takes to fulfill a request\n- Traffic: High-level measurement of overall demand\n- Errors: The rate at which requests fail\n- Saturation: Measurement of resource usage as a fraction of the whole; typically focuses on constrained resources\n\nWhile these metrics help home in on a better picture of overall system performance, they still require a non-trivial engineering investment to design, build, integrate, and configure a complete monitoring system. There is considerable effort involved in enumerating failure modes, and manually defining and associating the correct correlations in even simple cases can be time-consuming.\n\nIn contrast, observability offers a much more intuitive and complete picture as a first-class feature: You don’t need to manually correlate disparate monitoring tooling. An aggregated monitoring dashboard is only as good as the last engineer that built it; conversely, an observability platform adapts itself to present critical information in the right context, automatically. This can even extend further left into the software development lifecycle (SDLC), with observability tooling providing important performance feedback during CI/CD runs, giving developers operational feedback about their code.\n\nUltimately, observability provides more holistic debugging and understanding. Observability data can show the “unknown unknowns” to better understand production incidents. For more context into \"why\" that's important, the next section highlights an excellent example where monitoring might fall short and where observability fills in the crucial story.\n\n## Why focus on observability?\nFocusing on observability can help drive down mean time to resolution (MTTR), resulting in shorter outages, better application performance, and improved customer experience. While it may seem at first glance that monitoring can provide the same advantages, consider the anecdote that follows.\n\nAn engineering organization gets a ping from the accounting department; the invoice for cloud services is getting expensive, so much so that the CFO has noticed. DevOps engineers have pored over the monitoring system to no avail; every part of the system has consistently reported being in the green for things like memory, CPU, and disk I/O. As it turns out, the root cause was another \"unknown unknown\" event: DNS latency in the CI/CD pipelines was causing builds to fail at an elevated rate. Builds needing more retries consumed a great number of cloud resources. However, this effect never persisted long enough to reflect in the monitoring system. By adding observability tooling and collecting all event types in the environment, ops was able to zero in on the source of the problem and remediate it. In a traditional monitoring system, the organization would have had to have known about the DNS latency problem a priori.\n\nObservability is also important for non-technical stakeholders and business units. As technology becomes more intertwined with the primary profit silo, software infrastructure KPIs become business KPIs. Observability can provide better insight into KPI performance, as well as self-service options for different teams.\n\nModern software and applications depend heavily on providing good user experience (UX). As the previous story illustrates, monitoring static metrics won't always tell the complete story about UX or system performance. There might be serious issues lurking behind seemingly healthy metric dashboards.\n\n## Key observability metrics\nFor organizations that have decided to implement observability tooling, the next step is to identify the core goals of observability, and how that can best be implemented across their stack.\n\nAn excellent place to start is with the three fundamental pillars of observability:\n- Logs: Information and Events\n- Metrics: Measurements of specific metrics and performance data\n- Tracing: Logging end-to-end request performance during runtime\n\nAlthough this can seem overwhelming, projects like [OpenTelemetry](https://opentelemetry.io/) are helping to drive broad standards acceptance for logging, metrics, and tracing, enabling a more consistent ecosystem and a shorter time-to-value for organizations that implement observability with tooling built on OpenTelemetry standards.\n\nAdditional observability data and pillars include\n- Error tracking: more granular logs with aggregation\n- Continuous Profiling: evaluating granular code performance\n- Real User Monitoring (RUM): Understand application performance from the perspective of an actual user\n\nLooking at these pillars, a central theme starts to emerge; it's no longer enough to look at a small slice of time and space in modern distributed systems, a holistic, 10,000-foot view is needed. Understanding application performance starts with sampling it as an actual customer experiences it, and then further monitoring the complete performance and behavior of their interaction with your software.\n\nBeyond traditional application monitoring, observability can help improve the operational excellence posture for any engineering organization. Well-crafted alerts and incident management programs are usually born out of hard lessons from real outages. Implementing [chaos engineering](https://principlesofchaos.org/) can test observability platforms during real failures, albeit in a controlled environment with known outcomes. Introducing chaos engineering into systems where \"unknown unknowns\" might hide, not just in your production workloads but your CI/CD pipelines, supply chain, and DNS can yield significant gains in operational footing.\n\n## Observability is a critical part of DevOps\nNot only is observability critical for DevOps, but also for the entire organization. Replacing the static data of legacy monitoring solutions, [observability](/direction/monitor/platform-insights/) provides a full-spectrum view of application infrastructure.\n\nDevOps teams should be working with stakeholders to share observability metrics in a way that benefits the entire organization, as well as take steps to improve the implementation. Learning, and then evangelizing the benefits of app instrumentation to development teams can make observability even more effective. DevOps teams can also help identify the root cause of production incidents faster; well-instrumented application code makes it easy to distinguish from infrastructure issues. Finally, shifting observability left along the CI/CD pipeline means potential service-level objective (SLO) deltas are caught before they reach production.\n\nDevOps teams looking to provide meaningful improvements to application performance and business outcomes can look to observability as a way to deliver both.\n\n**Watch now: Senior Developer Evangelist Michael Friedrich digs deeper into the shift from monitoring to observability:**\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/BkREMg8adaI\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[9,875,1040],{"slug":6695,"featured":6,"template":686},"observability-vs-monitoring-in-devops","content:en-us:blog:observability-vs-monitoring-in-devops.yml","Observability Vs Monitoring In Devops","en-us/blog/observability-vs-monitoring-in-devops.yml","en-us/blog/observability-vs-monitoring-in-devops",{"_path":6701,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6702,"content":6708,"config":6714,"_id":6716,"_type":14,"title":6717,"_source":16,"_file":6718,"_stem":6719,"_extension":19},"/en-us/blog/observations-on-how-to-iterate-faster",{"title":6703,"description":6704,"ogTitle":6703,"ogDescription":6704,"noIndex":6,"ogImage":6705,"ogUrl":6706,"ogSiteName":670,"ogType":671,"canonicalUrls":6706,"schema":6707},"Why iteration helps increase the merge request rate","How the Monitor:Health team has been able to increase the merge request rate using better iteration, a bias for action, and by writing things down.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666603/Blog/Hero%20Images/book.jpg","https://about.gitlab.com/blog/observations-on-how-to-iterate-faster","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why iteration helps increase the merge request rate\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2020-05-06\",\n      }",{"title":6703,"description":6704,"authors":6709,"heroImage":6705,"date":6711,"body":6712,"category":791,"tags":6713},[6710],"David O'Regan","2020-05-06","\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2020-05-21.\n{: .alert .alert-info .note}\n\nDo you know much about fighter jets? It's okay if you don't, neither did I until I became a software developer. While it seems like a rather strange set of things to see a correlation with, they are intrinsically related through a man named [John Boyd](https://en.wikipedia.org/wiki/John_Boyd_(military_strategist)) who was a military strategist and a fighter pilot.\n\nBoyd was rather famous in the Air Force for a law he coined, which we're going to use to demonstrate the difference between iterative and recursive approaches to software development, why we favor it in the [Monitor:Health team](/handbook/engineering/development/ops/monitor/respond/) and why you might want to favor it too.\n\n_Boyd's Law of Iteration states that **speed** of iteration beats quality of iteration_\n\nThis law was developed by Boyd while observing dogfights between MiG-15s and F-86s. Even though the MiG-15 was considered a superior aircraft by aircraft designers, the F-86 was favored by pilots. The reason it was favored was simple: in one-on-one dogfights with MiG-15s, the F-86 won nine times out of ten.\n\nWhat's happening here? If the MiG is the better aircraft, why would the F-86 win the majority of the fights? Well according to Boyd who was one of the best dog-fighters in history suggested:\n\n> That the primary determinant to winning dogfights was observing, orienting, planning, and acting **faster** not better.\n\nThis leads to Boyd's Law of Iteration: Speed of iteration beats quality of iteration. What's pretty incredible is that you will find this same scheme throughout every section of modern software development:\n\n- Writing unit tests? Keep them small and lean so they can be run faster.\n- Writing usability tests? They work best when they're lean and you can quickly discard what's not working.\n- Writing a function, class, or feature? Start with the smallest, [most boring solution](https://handbook.gitlab.com/handbook/values/#boring-solutions) and iterate.\n- Doing an Agile approach? The quicker the better you'll often find.\n- Software in general is about failing early and often.\n\nSo lets pretend I've convinced you with some obscure fighter jet references and now you're ready to break down those merge requests and iterate quicker than you've ever iterated. Awesome! Let's talk about how to foster a team environment that allows for iteration, because that's the key here at GitLab. When you get started on this pilgrimage to [11 amazing merge requests per month as a goal](/handbook/engineering/development/performance-indicators/#mr-rate) you need to keep one very important thing in mind:\n\nIt's a team effort. While you as an individual developer will do an amazing job by hammering in on this skill, the real difference is made when you look at iteration as a tool to lift the team up. Think of yourself as the pilot that wants to get that faster iteration in to cover your buddies.\n\n## Bias for action\n\nWhen I got started at GitLab I was introduced to the idea of really believing in iteration as a methodology because it's a [company value](https://handbook.gitlab.com/handbook/values/#iteration).\n\n> Decisions should be thoughtful, but delivering fast results requires the fearless acceptance of occasionally making mistakes.\n\nThis was highlighted in various ways by different people across the company, but something that really stuck out to me was hearing another team member refer to the Monitor:Health team as a \"team with a strong bias for action\". We don't really believe in being reactive, instead we want to be we want to always be proactively improving the product. This underlying belief system trickles down from our team leader into every discussion, decision, deliverable set, and ultimately, how we as developers see our own agency operating. We **believe** in action, that an open merge request (even if it's not perfect) is always better than nothing.\n\nAs we mentioned, we have a bias for action. So, when our team anticipates a problem, we create a merge request first before starting a discussion. I know for a lot of people this might seem a bit counterproductive – what if this is a wasted effort? When in reality, [starting at a merge request](/handbook/communication/#start-with-a-merge-request) is the best possible place for any real discussion. It helps create a living log for the conversation, and creates more visibility for the problem we are fixing.\n\n## All code is bad code: Impostor syndrome, course correction, and accepting failure\n\nI had a mentor at my old company who was a fantastic programmer, and many of the people on my team looked up to him. One Friday afternoon, he gave a presentation that really shaped my understanding of iteration. This talk,  \"All code is bad code\" became rather famous in our small team because he mostly spoke about why the majority of the code he had written himself was ultimately bad code, and how the desire to **appear** smart is the number one barrier for people to become great software developers.\n\n> What you make with your code is how you express yourself, not the code itself - Eric Elliott\n\nProgramming is by its very nature difficult. As humans we're not particularly well-suited for deep and abstract logical thinking – our brains simply don't work like that by default and it's a learned skill for the most part. Being reminded of this is a humbling but freeing experience as it helps you move forward without fear. Every merge request you submit should be high quality but your definition of high quality should shift to mean delivering something useful to an end user.\n\nAt GitLab, we accept our limitations in that we might not know everything about the problem we're trying to solve. Instead, we lean heavily into the idea of the smallest, most [boring solutions](https://handbook.gitlab.com/handbook/values/#boring-solutions) that can be expanded upon quickly by collaborating with our team.\n\n> Our bias for action also allows us to course correct quickly.\n\nWe always accept there will be [uncertainty](https://handbook.gitlab.com/handbook/values/#accepting-uncertainty) in what we do as software developers but we don't let that stop us from trying to deliver an amazing product to our users.\n\nWhen we create a merge request, we do so with a [low sense of shame](https://handbook.gitlab.com/handbook/values/#low-level-of-shame) and [no ego](https://handbook.gitlab.com/handbook/values/#no-ego). This approach allows us to deliver fearlessly **even if we're wrong**.\n\nAs a team, this is the environment you want to foster because it helps create a wonderfully positive feedback loop: Low sense of shame > many merge requests submitted > more discussion > many iterations > ideally, the best possible collaborative results for the end user.\n\nThe core takeaway for team leaders is that **it's okay to make mistakes**. The best thing you can do as a team leader is to foster a safe place for developers to make mistakes and learn as they go.\n\nIf you're a developer, remember that **it's okay to make mistakes as long as you strive for course correction**.\n\n## Foster a healthy sense for urgency for writing things down\n\n> \"While you're thinking about doing it... just do it.\"\n\nIt's one of the things we do so well at GitLab in general it's writing things down. Documenting as we go is how we help our teampick up and go without needing to waste time on unnecessary communication.\n\nIt's safe to say that with our GitLab handbook being at [2,500,000 words](/handbook/about/#count-handbook-pages) and counting, the folks here take writing things down pretty seriously.\n\nAt GitLab, we believe this is also the path to a higher merge request rate.\n\nOn the Monitor:Health team and throughout GitLab believe in preserving our energy, capturing valuable conversations, and making them public to dispense this knowledge widely. As a new team member, I've seen this in action multiple times now. Over the course of my eight weeks at Gitlab, I can count on one hand the number of times I've had to ping a team member with a questions I could not find an answer to in our documentation. The discipline for keeping these notes really keeps the focus on delivering results since we don't have an excess of energy spent going back and forth with questions.\n\nIn my first four weeks at GitLab almost every single question I needed a answer to was already covered in the documentation someone else had already gone to the trouble of creating. Here is a list of some of my initial questions and links to the answers in GitLab documentation.\n\n- [How do I set up the local GitLab Development Kit?](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/master/doc/howto/auto_devops/tips_and_troubleshooting.md)\n- [How do I set up the GitLab Development Kit with Prometheus?](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/master/doc/howto/prometheus.md)\n- [How do I use embedded charts via Prometheus and Grafana?](https://docs.gitlab.com/ee/user/project/integrations/prometheus.html#embedding-gitlab-managed-kubernetes-metrics)\n- [How do I use the `@gitlab/ui` components?](https://gitlab.com/gitlab-org/gitlab-ui/-/blob/master/README.md)\n- [How do I handle styling in external projects?](https://gitlab.com/gitlab-org/gitlab-ui/-/blob/master/doc/css.md)\n- [How should components look and act on pages I am developing?](https://design.gitlab.com/)\n\nIf you can encourage your team to document solutions as problems arise, it can help developers deliver more.\n\n> Documentation is a love letter that you write to your future self. - Damian Conway\n\n## Tighten those feedback loops\n\n> Keep what works, disregard what doesn't.\n\nYou'll often notice that the feedback loop for tight-knit teams just gets tighter over time. People start to see patterns of what does and doesn't work as they work together over time. A good team should aim to address these patterns by keeping the ones that work and refining them but also by not being afraid to disregard the ones that don't work.\n\nRecently, the Monitor:Health team [delivered the first iteration of an incident management tool called the Status Page](https://about.gitlab.com/blog/how-we-built-status-page-mvc/). The team did an amazing job on the  [Status Page](https://gitlab.com/gitlab-org/status-page), with each team member really aiming to break problems into their smallest pieces and iterate quickly, which kept the overall merge request rate high for this project.\n\nThe post mortem of the development process is what made the biggest different. We came together as a team to discuss what aspects worked well and which aspects didn't with the end goal being to tighten our feedback loops so people can really work autonomously and asynchronously. It takes a lot of bravery to have a critical discussion about what didn't work publicly, and not just focus on all the things you have done well.\n\nHow does this play out? Well for us on the Monitor:Health team, it means getting better at refining issues to ensure that when they receive a `ready for development` label they are **truly** ready for anyone to pick up at any time and take it all the way to done. This really helps increase the overall merge request rate because developers don't need to sit through one to three feedback loops waiting for their questions to be answered, when they could be getting it done.\n\nFor an issue to have a [`ready for development` label](/handbook/product-development-flow/#build-phase-2-develop--test) it needs to have:\n\n- A clear definition of \"done\"\n- All the necessary conversations are already resolved inside the issue\n- Developer defines a clear set of expectations\n- Say whether tests are required\n- Say whether UX is needed\n\nWe are trying to enable **any** developer on the Monitor:Health team to read an issue with zero preexisting context and deliver a merge request related to the issue without needing to leave that issue. Remember, we're trying to [measure results not hours](https://handbook.gitlab.com/handbook/values/#measure-results-not-hours). The less time someone spends asking questions, the more time they can spend delivering results.\n\n> Hail to the issue, baby! - Duke Nukem if he was a software developer at GitLab\n\n## It's all about the team\n\nThe only reason we are able to create this level of velocity inside GitLab is because of the belief that we can and **should** iterate quickly. By having the support of the team across the main points in how to iterate, i.e., bias for action, low sense of shame, a healthy sense of urgency, and tight feedback loops is the bedrock that allows us to deliver results for customers via a better product.\n\nWell, that's all folks! I hope you enjoyed the read and learned something along the way. If you have any questions or want to suggest an improvement, drop me an email at: `doregan@gitlab.com`.\n\nWhen in doubt, iterate faster.\n\n## TL;DR, show me the proof\n\n![Results](https://about.gitlab.com/images/blogimages/iterate-faster/results.png){: .center}\n\nThe Monitor:Health frontend team has grown over time while increasing average merge request rate. The team's merge request rate reflects the current team size of four people.\n\n## Learn more\n\n- [GitLab Values](https://handbook.gitlab.com/handbook/values/)\n- [Boyds Law](https://blog.codinghorror.com/boyds-law-of-iteration/)\n- [All code is bad](https://www.stilldrinking.org/programming-sucks)\n- [Accepting failure](https://www.youtube.com/watch?v=UxvXgmZf6NU)\n\n[We're hiring](/jobs/) at GitLab, or consider [trying us out](/free-trial/) for free.\n\nCover image by [Aaron Burden](https://unsplash.com/photos/G6G93jtU1vE) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[728,9],{"slug":6715,"featured":6,"template":686},"observations-on-how-to-iterate-faster","content:en-us:blog:observations-on-how-to-iterate-faster.yml","Observations On How To Iterate Faster","en-us/blog/observations-on-how-to-iterate-faster.yml","en-us/blog/observations-on-how-to-iterate-faster",{"_path":6721,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6722,"content":6728,"config":6732,"_id":6734,"_type":14,"title":6735,"_source":16,"_file":6736,"_stem":6737,"_extension":19},"/en-us/blog/one-devops-platform-can-help-you-achieve-devsecops",{"title":6723,"description":6724,"ogTitle":6723,"ogDescription":6724,"noIndex":6,"ogImage":6725,"ogUrl":6726,"ogSiteName":670,"ogType":671,"canonicalUrls":6726,"schema":6727},"One DevOps platform can help you achieve DevSecOps","GitLab drives innovation in the AST market to secure cloud-native applications.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679348/Blog/Hero%20Images/locks.jpg","https://about.gitlab.com/blog/one-devops-platform-can-help-you-achieve-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"One DevOps platform can help you achieve DevSecOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2022-05-09\",\n      }",{"title":6723,"description":6724,"authors":6729,"heroImage":6725,"date":2783,"body":6730,"category":875,"tags":6731},[1454],"\n\nApplication security testing (AST) is a fast-moving and important area for software development. DevOps methodologies have spurred the need to integrate testing within the developer’s workflow. GitLab believes the more ingrained AST is in the software factory, the more secure applications will be and the easier it will be for companies to meet compliance demands. We believe our [strategic platform approach](/why-gitlab), where security and compliance are embedded in DevOps from planning to production, provides efficiency and value unmatched by traditional application security vendors.\n\nGartner® has named GitLab a Challenger in the [2022 Gartner Magic Quadrant™ for Application Security Testing](https://page.gitlab.com/resources-report-gartner-magic-quadrant-ast.html). According to Gartner, “a major driver for the evolution of the AST market is the need to support enterprise [DevSecOps](/topics/devsecops/) and cloud-native application initiatives.”\n\n“We are excited to see continued momentum for our unique approach that embeds security into the DevOps workflow,” says Hillary Benson, GitLab director of product management. This is the third year that GitLab has been recognized in the Gartner Magic Quadrant for Application Security Testing. “We believe that our recognition as a Challenger in the Magic Quadrant represents an evolving market understanding of the value of an approach that empowers and enables developers to find and fix vulnerabilities – and the simplicity of leveraging a DevOps platform to do so.”\n\n> **You can read more about the results and download a copy of the report by visiting [our commentary page](/analysts/gartner-ast22/).**\n\n\nGitLab’s complete DevOps platform approach provides automation needed by DevOps, along with policy and vulnerability management needed by security professionals. GitLab’s Ultimate tier provides an integrated, vetted, and managed set of scanners to meet the security and compliance needs of modern-day application development and [cloud-native](/topics/cloud-native/) environments. \n\n## A unique approach to AST\n\nWe continue to innovate in the application security space. Let’s look at how we’re different from many of the more traditional stand-alone AST technologies. It’s these very differences that provide benefits achievable by using a single platform for DevOps and security. For example: \n\nWe build comprehensive scans into the CI pipeline to enable a more interactive testing environment. This is a unique approach as others in the category focus their offering on instrumentation-based interactive AST. With GitLab, the developer gets a more complete view of the security flaws as they are created – when they are most efficiently resolved.\n\nSimilarly, while analysts place emphasis on lightweight spell-check-like SAST features, we have found that these features are less important to GitLab users, again because of our built-in approach. A metaphor may be helpful to explain. We are all accustomed to saving documents frequently so edits are not lost. Developers do the same while editing software. Changes made are “committed” frequently to the code repository. Upon hitting the ‘commit’ button, GitLab performs a true, [SAST scan](/direction/secure/static-analysis/sast/) on code changes, which gives developers instant and more complete feedback. And DevOps teams can choose to enable  [DAST scanning](https://docs.gitlab.com/ee/user/application_security/dast/) that uses GitLab’s review app feature to assess changes pre-merge. Similarly,  [dependencies](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/), containers, infrastructure as code, and more can all be scanned, at the push of the commit button.\n\nIn addition, GitLab also is keen on providing DevOps teams just-in-time education about vulnerabilities and fixes. Now, via partnerships with [Kontra](/blog/kontra-and-gitlab-integrate-vulnerability-education-into-the-devops-workflow/) and [Secure Code Warrior](/blog/heres-how-to-get-integrated-secure-coding-advice-in-gitlab/), GitLab provides developers with crisp training on how to mitigate the specific vulnerability they just created. This helps developers learn proper coding techniques instead of flagging the problem to figure out later.\n\n## Concentrating on compliance\n\nShifting compliance left and embedding it deep into the software development lifecycle, a.k.a. [continuous software compliance](/solutions/compliance/), is also a priority for GitLab.\n\n“We enable organizations to create policies that align with their compliance regulations and enforce them throughout the application development workflow,” Benson says. “Rather than juggling multiple policy enforcement applications, you have a single lens for visibility across the entire lifecycle.” For instance, a company can develop granular compliance pipeline policies that require a SAST to run for every commit in a certain project or a chain of MR approvals that developers can’t circumvent. “Those types of common controls and separation of duties simplify software audits and speed up application deployments.”\n\nGitLab is honored to be recognized in the Gartner Magic Quadrant, and will continue to empower and unite developers and security professionals alike using repeatable, defensible processes that automate security and compliance policies from development through production.\n\n> **[Start a free Ultimate trial](https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com&glm_content=default-saas-trial)**\n \n_Gartner, “Magic Quadrant for Application Security Testing,” Dale Gardner, Mark Horvath, Dionisio Zumerle, April 18, 2022. Gartner does not endorse any vendor, product or service depicted in our research publications, and does not advise technology users to select only those vendors with the highest ratings or other designation. Gartner research publications consist of the opinions of Gartner's research organization and should not be construed as statements of fact. Gartner disclaims all warranties, expressed or implied, with respect to this research, including any warranties of merchantability or fitness for a particular purpose. GARTNER and Magic Quadrant are registered trademarks and service marks of Gartner, Inc. and/or its affiliates in the U.S. and internationally and are used herein with permission. All rights reserved._\n\nCover image by [Fly:D](https://unsplash.com/photos/ZNOxwCEj5mw) on Unsplash\n{: .note}\n",[9,2243,2981,875,1158],{"slug":6733,"featured":6,"template":686},"one-devops-platform-can-help-you-achieve-devsecops","content:en-us:blog:one-devops-platform-can-help-you-achieve-devsecops.yml","One Devops Platform Can Help You Achieve Devsecops","en-us/blog/one-devops-platform-can-help-you-achieve-devsecops.yml","en-us/blog/one-devops-platform-can-help-you-achieve-devsecops",{"_path":6739,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6740,"content":6746,"config":6751,"_id":6753,"_type":14,"title":6754,"_source":16,"_file":6755,"_stem":6756,"_extension":19},"/en-us/blog/open-core-is-worse-than-plugins",{"title":6741,"description":6742,"ogTitle":6741,"ogDescription":6742,"noIndex":6,"ogImage":6743,"ogUrl":6744,"ogSiteName":670,"ogType":671,"canonicalUrls":6744,"schema":6745},"Open core is worse than plugins... and that’s why it’s better","Learn why GitLab's decision to opt for the \"worse\" choice has been a great success.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681581/Blog/Hero%20Images/gitlab-linux-ibm-z-redhat-openshift.jpg","https://about.gitlab.com/blog/open-core-is-worse-than-plugins","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Open core is worse than plugins... and that’s why it’s better\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2022-07-14\",\n      }",{"title":6741,"description":6742,"authors":6747,"heroImage":6743,"date":6748,"body":6749,"category":679,"tags":6750},[1609],"2022-07-14","\nOpen core is obviously a horrible approach to creating a product with an ecosystem of extensions and integrations: There are no proper protocols and interfaces. Instead, anyone can just add their integration to the code base and even adjust said code base to their needs if it doesn’t fit.\n\nSo why have we been using the “Worse” approach at GitLab for many years now, with great success? Because [Worse is Better](https://www.dreamsongs.com/RiseOfWorseIsBetter.html) (a term conceived by [Richard P. Gabriel](https://en.wikipedia.org/wiki/Richard_P._Gabriel)). Of course, it turns out that “Worse” is actually even better than Worse is Better suggested.\n\nGabriel’s [original argument](https://www.dreamsongs.com/RiseOfWorseIsBetter.html) was that (slightly) intrinsically worse but simpler and easier to implement software has better survival characteristics than better-designed, more complex software, and thus will consistently win in the marketplace.\n\nAt GitLab, we have found that this is basically true, which is why we, for example, favor “boring technology,” even if it might not be the best possible solution for a given scenario. But this doesn’t tell the whole story: It turns out that such software is not just more successful, it also ends up being qualitatively better in the end.\n\n## Worse is even better\n\nIt is important to note that Gabriel’s original argument was not that **bad** software wins out. In fact, both his “worse” and his “better” have the same qualities:\n\n1. Simplicity, of interface and implementation\n2. Correctness\n3. Consistency\n4. Completeness\n\nHowever, his “worse” and his “better” have slightly different weights for the value placed on these characteristics, with the (worse) New Jersey school favoring simplicity of implementation over simplicity of interface, whereas the (better) “MIT” school favors simplicity of interface, even at the cost of a more complex implementation.\n\nIf a simple interface can be achieved with a simple implementation, both schools agree, the difference comes when there are tradeoffs to be made.\n\nWhat makes worse even better, and what Gabriel didn’t take into account even in later [versions](https://www.dreamsongs.com/WorseIsBetter.html), is the tremendous value of feedback loops. Being early doesn’t just let the New Jersey approach win in the marketplace, it also allows it to collect feedback much, much earlier and much more quickly than the MIT approach.\n\nPaul MacCready won the first [Kremer prize](https://en.wikipedia.org/wiki/Kremer_prize) not by initially setting out to build the best human-powered aircraft, but by building the one that was easiest to repair in order to gather feedback more quickly. While other teams took a year or more to recover from a crash, his plane sometimes flew again the same day. And so it was exactly this willingness to lose sight of the prize that resulted in him winning it.\n\nIn much the same way, it is these quick feedback loops that a “worse” approach enables, started much earlier, that eventually lead to a better product.\n\n## The problem with plugins\n\nAt least since the success of Photoshop, a proper plugin interface has been recognized as _The Right Way_ to make software both more compelling for users and less easy to leave behind by creating a third-party ecosystem that provides useful functionality without the vendor having to provide all of that functionality themselves.\n\nIt was so successful that systems like OpenDoc took the idea further to be just a set of plugins, with no real hosting application. None of these systems succeeded in the marketplace.\n\nOne of the reasons is that good plugin interfaces are not just hard, but downright fiendishly difficult to develop. The basic difficulty is that it is hard to get the balance right: what to expose, what to keep hidden, how to provide functionality. But that’s not the fiendish part.\n\nThe fiendishly difficult part of plugin API development is that the very things you need to do to handle the difficulties make the task even harder: You need to design more carefully, you need to make interfaces stable, you can only iterate them slowly.\n\nIn short: You face a chicken-and-egg problem of premature abstraction. In order to make a good plugin API, you need to see it being used, but in order to see how it is being used, you need to first have it. This dynamic delays initial availability and makes feedback cycles slower.\n\nSoftware is not the only domain facing this problem. Parks, for example, often have official paths that don’t match where people actually want to go. One group of landscape architects solved this by doing less: They didn’t put in any walkways in a park they had created. Instead, they waited for trails to materialize as people walked where they needed to walk. Only after those trails had materialized did they pave them, making them official.\n\nLast but not least, a plugin interface means that the final product the user sees, consisting of both the core application and all the plugins, is not as well-integrated as it could be. The value proposition of “here is a box with tools, have fun!” sounds a lot more enticing to developers than it does to end users, even when those tools are, by themselves, best of breed.\n\n## Open core\n\nOpen core, on the other hand, sounds like exactly the wrong approach, certainly from a software engineering point of view, as there are no defined black-box boundaries, but also from a business point of view as there doesn’t seem to be an actual mutually reinforcing ecosystem.\n\nHowever, the open core approach is great for end users, both for adopters who just want to use it and also adapters who need to tailor the system to their use case. And in the end, it is the end users that count.\n\nFor adapters, the system is immediately hackable. There is no need to wait for the vendor to provide a plugin interface in the first place, and no need to wait more for the vendor to make that plugin interface provide the functionality needed for a particular application some time in the future, if ever. Even if changes to the core application are required, this is at least possible.\n\nSince there is more adaptation activity happening sooner, the system becomes better at accommodating adaptation needs, and a virtuous cycle ensues.\n\nFor adopters, the benefits are multifold: First, the system gets more functionality more quickly, which is always good. Almost more importantly, this functionality is integrated by the vendor and provided as an integrated whole. There is a reason single-vendor office suites succeeded where OpenDoc’s toolbox approach failed.\n\nThat said, an open core approach does require solid engineering, a good architectural base, and ongoing vigilance. As [explained earlier](https://thenewstack.io/why-were-sticking-with-ruby-on-rails-at-gitlab/), we believe that Ruby on Rails provided us with a good starting point to build GitLab as a solid modular monolith, both approachable and well-structured. With that as a starting point, good design is encouraged by example, rather than being enforced by strict API boundary. Enforcement, on the other hand, comes in a more human form as pull requests are considered, shaped, and approved or rejected.\n\nSo boundaries still exist, but instead of being brick walls to crash against, they are low fences that are noticeably present, but can be stepped over if needed.\n\nAnd although these low fences are considered “worse” than the brick walls we are used to, they actually lead to better outcomes for everybody involved.\n",[9,682,267],{"slug":6752,"featured":6,"template":686},"open-core-is-worse-than-plugins","content:en-us:blog:open-core-is-worse-than-plugins.yml","Open Core Is Worse Than Plugins","en-us/blog/open-core-is-worse-than-plugins.yml","en-us/blog/open-core-is-worse-than-plugins",{"_path":6758,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6759,"content":6764,"config":6769,"_id":6771,"_type":14,"title":6772,"_source":16,"_file":6773,"_stem":6774,"_extension":19},"/en-us/blog/optimizing-devops-visibility-in-gitlab-14",{"title":6760,"description":6761,"ogTitle":6760,"ogDescription":6761,"noIndex":6,"ogImage":5897,"ogUrl":6762,"ogSiteName":670,"ogType":671,"canonicalUrls":6762,"schema":6763},"Optimize DevOps with enhanced visibility tools in GitLab 14","How GitLab 14's end-to-end visibility and actionability can help users understand and improve delivery and alignment.","https://about.gitlab.com/blog/optimizing-devops-visibility-in-gitlab-14","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Optimize DevOps with enhanced visibility tools in GitLab 14\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cormac Foster\"}],\n        \"datePublished\": \"2021-07-21\",\n      }",{"title":6760,"description":6761,"authors":6765,"heroImage":5897,"date":6766,"body":6767,"category":679,"tags":6768},[1134],"2021-07-21","\n[DevOps makes teams and work more efficient](/topics/devops/how-and-why-to-create-devops-platform-team/), more consistent, and more productive – but how much more?\n\nOn its surface, the answer is simple. We need to measure workflow from idea to delivery, identify and remove blockers, and benchmark improvements in a manner that is consistent and replicable. The challenge is the way we've typically built the systems that hold the data we're trying to understand.\n\nEnhanced visibility tools are essential to measuring and optimizing modern DevOps processes, and mapping the work output to ensure the business outcomes that matter are achieved.\n\n## The failure of DIY DevOps\n\nMost businesses operate and maintain a multi-product \"DIY DevOps\" toolchain, but stitched-together applications with bespoke integrations don't lend themselves to visibility. Each component in the toolchain captures a unique set of data, with distinct formatting and metadata, logged to a siloed data store. Extracting, correlating, and displaying that data is a labor intensive chore – assuming the various APIs allow proper access at all. Poor visibility can lead to slow and imprecise decision-making and misalignment between teams, but building and maintaining visibility in DIY toolchain saps resources from your business, adding work instead of removing it.\n\n## A platform for visibility\n\nAt GitLab, we believe that stumbling in the dark and maintaining complex toolchains are not viable business strategies. We all deserve better, and [GitLab 14](/gitlab-14/) is the [DevOps platform](/topics/devops-platform/) that provides enhanced visibility without added work. As a complete DevOps platform, GitLab 14 is uniquely capable of delivering visibility into DevOps processes, surfacing out-of-the-box insights from across the product delivery lifecycle and helps users understand what works, what doesn't, and how to make improvements.\n\n## Metrics that matter\n\n![Lead Time for Changes helps you understand your team's velocity, agility, and efficiency, from the first code commit to production.](https://about.gitlab.com/images/blogimages/lead_time.png){: .shadow}\nLead Time for Changes helps you understand your team's velocity, agility, and efficiency.\n{: .note.text-center}\n\nGitLab 14 delivers operational metrics to help users understand DevOps maturity and benchmark progress. The DevOps Research and Assessment (DORA) firm demonstrated how DevOps maturity leads to positive business outcomes like happier customers, greater market share, and increased revenue. They've outlined [four key metrics](https://cloud.google.com/blog/products/devops-sre/using-the-four-keys-to-measure-your-devops-performance) that are highly correlated with business performance, and GitLab 14 surfaces two of the four. [Deployment Frequency](https://docs.gitlab.com/ee/user/analytics/ci_cd_analytics.html#deployment-frequency-charts) charts help monitor the efficiency of deployments over time, find bottlenecks, and understand when and how to improve deployment process. [Lead Time for Changes](https://docs.gitlab.com/ee/user/analytics/ci_cd_analytics.html#lead-time-charts) helps users understand their team's velocity, agility, and efficiency – from the first code commit to all the way through production.\n\n## Actionable insights\n\n![Value Stream Analytics lets you zero in on value blockers and immediately remediate them.](https://about.gitlab.com/images/blogimages/value_stream_analytics.png){: .shadow}\nValue Stream Analytics lets you zero in on value blockers and immediately remediate them.\n{: .note.text-center}\n\nAfter identifying opportunities for change, you should be able to take action right away with GitLab 14. Our [customizable Value Stream Analytics](https://docs.gitlab.com/ee/user/analytics/value_stream_analytics.html) tools allow teams to monitor specific workflows tailored to their particular needs and identify high-priority blockers to delivering value to customers.\n\nUnlike products that focus exclusively on visibility and discovery, GitLab 14 makes these insights actionable. With one click, users can move from identifying a merge request stuck in code review or an issue waiting for approval to solving the problem. Actionable insights removes wasteful loops of questions and clarifications, and allows all users to focus on productive work.\n\n## See for yourself\n\nWant to learn more? Learn how GitLab customers like [Crédit Agricole](/customers/credit-agricole/), [Hotjar](/customers/hotjar/),and [others](/customers/) are turning visiblity and and insights into business value, or take the next step and [try GitLab Ultimate for free](/free-trial/)!\n\nThis blog is part two in a three-part series on some of the top features of GitLab 14. Learn more about how GitLab 14 includes some of the [top Security features in part one](/blog/are-you-ready-for-the-newest-era-of-devsecops/). \n",[9,855],{"slug":6770,"featured":6,"template":686},"optimizing-devops-visibility-in-gitlab-14","content:en-us:blog:optimizing-devops-visibility-in-gitlab-14.yml","Optimizing Devops Visibility In Gitlab 14","en-us/blog/optimizing-devops-visibility-in-gitlab-14.yml","en-us/blog/optimizing-devops-visibility-in-gitlab-14",{"_path":6776,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6777,"content":6783,"config":6788,"_id":6790,"_type":14,"title":6791,"_source":16,"_file":6792,"_stem":6793,"_extension":19},"/en-us/blog/pipeline-editor-overview",{"title":6778,"description":6779,"ogTitle":6778,"ogDescription":6779,"noIndex":6,"ogImage":6780,"ogUrl":6781,"ogSiteName":670,"ogType":671,"canonicalUrls":6781,"schema":6782},"Meet Pipeline Editor, your one-stop shop for building a CI/CD pipeline","The Pipeline Editor reduces the complexity of configuring your CI/CD pipelines.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665961/Blog/Hero%20Images/image_cover.jpg","https://about.gitlab.com/blog/pipeline-editor-overview","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Meet Pipeline Editor, your one-stop shop for building a CI/CD pipeline\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2021-02-22\",\n      }",{"title":6778,"description":6779,"authors":6784,"heroImage":6780,"date":6785,"body":6786,"category":791,"tags":6787},[2022],"2021-02-22","\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2021-03-02.\n{: .note .alert-info .text-center}\n\nIn GitLab 13.8, we introduced the first iteration of the [Pipeline Editor](/releases/2021/01/22/gitlab-13-8-released/): a dedicated editor designed for authoring your CI/CD. It is your one-stop shop for everything you need to configure your CI/CD pipelines.\n\n## Why do we need a dedicated editor for pipelines?\n\nGitLab's advanced syntax provides a high degree of customization for sophisticated and demanding CI/CD use cases. However, all of this power and flexibility comes with a fair bit of complexity. The Pipeline Editor helps you mitigate this challenge and serves as a single solution that groups all existing CI authoring features in a single location. It is our foundation, and we plan to build on it with enhancements in future iterations. \n\n## Getting started\n\nIn order for the pipeline editor to work, you'll first need to create a `.gitlab-ci.yml` file in your project. The `.gitlab-ci.yml` is a [YAML file](https://en.wikipedia.org/wiki/YAML) where you configure specific GitLab CI/CD instructions. Check out how we are working on [improving the first-time experience of creating a `.gilab-ci.yml` file directly from the Pipeline Editor](https://gitlab.com/groups/gitlab-org/-/epics/5276). \n\n### Continuous validation\nOnce you have created the `.gitlab-ci.yml` file and navigated to it in the Pipeline Editor, you can begin editing your configuration. Writing YAML can be error prone. No matter how technical or skilled you are, programming mistakes happen. Sometimes an indentation will be missed, the incorrect syntax is used, or the wrong keyword is selected, and that's OK! As you start authoring your pipeline, GitLab will inspect the pipeline configuration using our linting APIs and provide you with an indicator of whether your pipeline configuration is valid or not. We will continuously validate your pipeline without making any changes to your pipeline configuration, so you can have confidence in hitting \"merge\" and running your pipeline without any surprises. \n\n![Continuous validation of pipelines](https://about.gitlab.com/images/blogimages/2020-02-08-Pipeline-editor-overview/image1.png){: .shadow.medium.center}\nContinuous validation of your pipelines\n{: .note.text-center}\n\n### Pipeline visualizer: Seeing is believing\nIt's practically impossible to envision what a pipeline should look like when you start writing from a blank YAML file. Luckily, GitLab provides you with a full pipeline view for every running pipeline. But, what if you want to visualize your pipeline _before_ they begin to run? Well, you can do that now by navigating to the \"Visualize\" tab in the Pipeline Editor. You'll find an illustration that shows how your pipeline should look as you write it, similar to the linter, and GitLab will display the visual before making any commits, before running, or before altering your pipeline in any way.\n\nIn the visualization, we will group all your defined pipeline jobs by stages and add links between the jobs based on the [needs](https://docs.gitlab.com/ee/ci/yaml/#needs) relationships you've configured.\n\nIf we take a look at the example below, you can easily see that I've configured a three-stage pipeline, where the build stage has three jobs (step 1-3), and that step 4 needs steps 1 and 3.\n\n![Pipeline editor overview](https://about.gitlab.com/images/blogimages/2020-02-08-Pipeline-editor-overview/image2.png){: .shadow.medium.center}\nPipeline visualizer\n{: .note.text-center}\n\nHere is what the YAML looks like:\n\n ```yaml\nimage: alpine:latest\n\nstages:\n   - test\n   - build\n   - deploy\n\nprepare:\n   script: exit 0\n   stage: test\n\nstep1:\n   script: echo testo\n   stage: build\nstep2:\n   script: echo testo\n   stage: build\nstep3:\n   script: echo testo\n   stage: build\n\nstep4:\n   needs: ['step1', 'step3']\n   script: exit 0\n   stage: deploy\n ```\n\n### View an expanded version of the CI/CD configuration\nWhen configuring pipelines, you use keywords like 'include' and 'extends' often. These keywords help break down one long pipeline configuration file into multiple files, which increases readability and reduces duplication. Unfortunately, those keywords can make a pipeline configuration hard to follow. In some configurations, a pipeline configuration file can be mostly composed of a list of other included configuration files.\n\nTo make the configuration easier to follow, we've added the ability to view a version of your pipeline configuration with all of the 'includes' and 'extends' configurations merged together as a fourth tab in the Pipeline Editor. Now it's much easier to understand more complex pipeline flows and this simplifies the debugging process.\n\nPipeline configuration example:\n\n![pipeline configuration](https://about.gitlab.com/images/blogimages/2020-02-08-Pipeline-editor-overview/image6.png){: .shadow.medium.center}\n\nThe expanded version of the pipeline configuration:\n\n![expanded pipeline configuration](https://about.gitlab.com/images/blogimages/2020-02-08-Pipeline-editor-overview/image7.png){: .shadow.medium.center}\n\n### Lint\n\nThe CI lint helps you validate your pipeline configuration and provides you with additional information about it. That's why we've copied the existing CI linter (which was well hidden in our jobs page) to the Pipeline Editor as a third tab.\n\nThe linter provides you with detailed information about every job you've configured in your pipeline. For each job, it provides the [before_script](https://docs.gitlab.com/ee/ci/yaml/#before_script), [after_script](https://docs.gitlab.com/ee/ci/yaml/#after_script), and [script](https://docs.gitlab.com/ee/ci/yaml/#script) fields, tags, environment names, branches it should run, and more…\n\nIf you look at the following example, just by looking at the linter tab you'll know that the `prepare` job:\n* Runs in the `prepare` stage\n* Contains `before_script`, `script`, and `after_scripts` fields \n* Runs only on master \n* Runs upon failure\n* Tag as production\n* Has the environment set to production \n\n![image3](https://about.gitlab.com/images/blogimages/2020-02-08-Pipeline-editor-overview/image3.png){: .shadow.medium.center}\n\nIn this second example, you can see that the build job is a manual job that runs on all branches and is allowed to fail:\n\n![Manual build job](https://about.gitlab.com/images/blogimages/2020-02-08-Pipeline-editor-overview/image5.png){: .shadow.medium.center}\n\n## How the Pipeline Editor came about\n\nEarlier this year, we decided to split continuous integration into two separate teams: [Continuous Integration](/direction/verify/continuous_integration/), which is responsible for improving the experience of running a CI/CD pipeline, and [Pipeline Authoring](/direction/verify/pipeline_composition/), responsible for helping you author your pipeline. We've defined the Pipeline Authoring team goal as, \"Making the authoring experience as easy as possible for both advanced and novice users.\"\n\n![Verify Groups](https://about.gitlab.com/images/handbook/engineering/verify/verify_groups_banner.jpg){: .shadow.center}\n\nAs a team, we realized that a dedicated authoring area is needed to achieve our [ambitious roadmap](https://youtu.be/hInM7JUEH4Y) – this is when the Pipeline Editor idea was formed. \n\n## Try out Pipeline Editor yourself\n\nThat's it! I hope you found this overview useful. To get started with GitLab CI, you can [try out our hosted GitLab.com solution](/free-trial/), or you can [download GitLab Self-Managed](/free-trial/) and read its documentation for more in-depth coverage of the functionality. \n\nIf you are using our Pipeline Editor, we would love it if you leave us a note on our [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/298928)! If you'd like to learn more about the upcoming features, feel free to read through the [Pipeline Editor second iteration epic](https://gitlab.com/groups/gitlab-org/-/epics/4814), and tag `@dhershkovitch` if you have any questions.\n",[976,977,9,916],{"slug":6789,"featured":6,"template":686},"pipeline-editor-overview","content:en-us:blog:pipeline-editor-overview.yml","Pipeline Editor Overview","en-us/blog/pipeline-editor-overview.yml","en-us/blog/pipeline-editor-overview",{"_path":6795,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6796,"content":6801,"config":6807,"_id":6809,"_type":14,"title":6810,"_source":16,"_file":6811,"_stem":6812,"_extension":19},"/en-us/blog/pipelines-as-code",{"title":6797,"description":6798,"ogTitle":6797,"ogDescription":6798,"noIndex":6,"ogImage":928,"ogUrl":6799,"ogSiteName":670,"ogType":671,"canonicalUrls":6799,"schema":6800},"Pipelines-as-Code: How to improve speed from idea to production","Pipelines-as-Code streamline automatic building, testing, and deploying of applications using prebuilt pipelines and infrastructure components. Here's how it works.","https://about.gitlab.com/blog/pipelines-as-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Pipelines-as-Code: How to improve speed from idea to production\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Robert Williams\"}],\n        \"datePublished\": \"2022-01-18\",\n      }",{"title":6797,"description":6798,"authors":6802,"heroImage":928,"date":6804,"body":6805,"category":791,"tags":6806},[6803],"Robert Williams","2022-01-18","\nToday’s DevOps platform-centric world is moving steadily towards an \"Everything-as-Code\" mentality. Add in cloud native, and it's clearly even more important to standardize how you define your DevOps processes.\n\n## Why ‘as-Code’?\n\nThanks to faster iteration, cloud native computing, and [microservices-based architectures]\n(https://about.gitlab.com/topics/microservices/), as-Code technologies have become the de-facto standard for a lot of different parts of the software development lifecycle. \n\nThe need to release faster requires a single spot for teams to collaborate on any kind of change – code, infrastructure, configuration, networking, or testing. And to implement that change quickly we need to be able to see and review it before it goes into production. \n\nAs-Code solutions are at the core of cloud native technologies such as Kubernetes, where you utilize YAML or JSON formats to configure and manage. Here are the key advantages of 'as-Code':\n\n- auditability\n- scalability\n- efficiency\n- collaboration\n\nThese benefits come into play with every piece of technology that moves into as-Code; we have seen it time and again as DevOps processes mature and we automate each piece of the software development lifecycle. Here are the critical 'as-Code' stages: \n\n### Build-as-Code\n\nOne of the first steps when building a new pipeline is to implement a way to build your application automatically. Containerization is one of the most common ways: You define your build steps as a Dockerfile and then you have automated the build of the application.\n\n### Test-as-Code\n\nAs our deployment frequency and team size scales, the need for test cases to be automated scales as well. So we automate, we write unit tests and test scripts to execute unit tests, and then we ensure the changes can be continuously integrated safely, without introducing unplanned bugs.\n\n### Security-as-Code\n\nTo ensure software gets to market quickly, security must be included in your testing process. The testing has to happen either through tools integrated with each individual project, or implemented as code, creating job templates for security scanners that can be ingested by projects as required. These steps enable teams to quickly become compliant with various security frameworks (like PCI-DSS, HIPAA,,or ISO) as they become relevant for the project.\n\n### Deployment-as-Code\n\nDeployments need to be standardized so they are predictable every time. To ensure successful peer review, production and development environment deployments need to be the same, and there's an added bonus of a quality gate between them. Through scripting and implementation of Deployment-as-Code, we end up with the ability to continuously deploy code and continuously deliver value.\n\n## Why Pipelines-as-Code?\n\nPipelines are the center of the CI/CD workflow – they're the automation heart that powers all of the benefits of as-Code technologies. Once you have the Build-as-Code, Test-as-Code, Deployment-as-Code, Infrastructure-as-Code, and Configuration-as-Code, you have all the parts needed to ensure that you can reliably and predictably take your application into production environments. But, to move changes in with agility, you need to take all those parts and string them together into a pipeline.\n\nThe technology behind Pipelines-as-Code makes it possible to create centralized repositories for your organization's pipelines. Pipelines-as-Code can be set up to fit all boxes for varied languages and use cases (like [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/)) or with a [number of options](https://gitlab.com/gitlab-org/gitlab/-/tree/master/lib/gitlab/ci/templates) so that developers can pick base pipelines to fit their use case. It's important to have a baseline that conforms to the organization's standards because that always increases the speed to production.\n\nThe entire team can collaborate on changes to each part of the workflow. Version history can be easily maintained in the same version control system as everything else that touches the DevOps lifecycle.\n\nThe benefits of as-Code technology reach a pinnacle with Pipelines-as-Code, so teams gain increases in efficiency, scalability, auditability, and collaboration. Pipelines-as-Code are at the center of automated GitOps, DevOps, and SecOps workflows.\n",[1181,1041,9],{"slug":6808,"featured":6,"template":686},"pipelines-as-code","content:en-us:blog:pipelines-as-code.yml","Pipelines As Code","en-us/blog/pipelines-as-code.yml","en-us/blog/pipelines-as-code",{"_path":6814,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6815,"content":6821,"config":6826,"_id":6828,"_type":14,"title":6829,"_source":16,"_file":6830,"_stem":6831,"_extension":19},"/en-us/blog/plugin-instability",{"title":6816,"description":6817,"ogTitle":6816,"ogDescription":6817,"noIndex":6,"ogImage":6818,"ogUrl":6819,"ogSiteName":670,"ogType":671,"canonicalUrls":6819,"schema":6820},"The problem with plugins","For all of the customization, plugins sometimes come at a high price.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673012/Blog/Hero%20Images/plugin-instability.jpg","https://about.gitlab.com/blog/plugin-instability","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The problem with plugins\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-09-27\",\n      }",{"title":6816,"description":6817,"authors":6822,"heroImage":6818,"date":6823,"body":6824,"category":679,"tags":6825},[788],"2019-09-27","\nWe’ve talked a lot over the past year about how [all-in-one is taking over the marketplace model](/blog/github-launch-continuous-integration/), and we highlighted [CloudBees adding SDM](/blog/jenkins-one-year-later/) in our most recent example. Even with all of the consolidation we’ve seen lately, plugins are still a popular [DevOps solution](/topics/devops/). On the surface, there’s a lot to appreciate: Literally thousands of plugins offer seemingly limitless customization without you having to make large investments in other tools. Need something? Chances are there’s a plugin for that.\n\nJenkins plugins have served as both a selling point **_and_** a downside – but how can a strength also be a weakness? All that customization comes with a few caveats.\n\n## Plugins and security vulnerabilities\n\nJenkins offers more than 1,600 community-contributed plugins. David Fiser over at the TrendLabs Security Intelligence Blog highlighted some [Jenkins security advisories associated with plain-text-stored credentials](https://blog.trendmicro.com/trendlabs-security-intelligence/hiding-in-plain-text-jenkins-plugin-vulnerabilities/) from July and August 2019. There were six plugins affected, one of which has been deprecated. At the time of article publication (August 30), three of the plugins had not been fixed.\n\nTo properly store credentials, a third-party credential provider, such as the `Credentials` plugin, is recommended. Organizations can also use a [`Secret`](https://javadoc.jenkins.io/index.html?hudson/util/Secret.html) to store credentials. Jenkins was proactive in identifying these potential problems but, in the case of plugins, Jenkins can only recommend best practices and notify users once they’re aware of a potential issue. Because the plugins are operated by third parties, there’s also no guarantee any problems will be fixed.\n\nInstalling Jenkins plugins is limited to either a dedicated Jenkins admin or someone with exclusive access to the Jenkins filesystem, but uploading a potentially malicious plugin to the Jenkins plugin site doesn’t require as much authentication.\n\nThe team at CyberArk wanted to see just how easy it would be for an attacker to infiltrate a plugin. Dubbed [Aladdin’s Lamp](https://www.cyberark.com/threat-research-blog/jenkins-plugins-aladdins-lamp-and-the-sultan-of-threats/), the CyberArk team modified the existing Green Balls plugin that changed the plugin image to an image of Aladdin’s lamp. What they inserted discreetly into the code was a capability that gave any unauthenticated remote attacker SYSTEM access to a Jenkins master that installed their plugin with a specially crafted request:\n\n[`http://jenkinsURL:8080/OpenSesame`](http://jenkinsURL:8080/OpenSesame)\n\nTheir experiment was not malicious, of course, but it highlighted just how easy it could be to exploit the plugin ecosystem.\n\n## Plugins and brittle pipelines\n\nIt’s a tall order for users to weigh the pros and cons of more than 1,600 plugins, and many people rely on a plugin’s popularity in order to gauge whether it’s a suitable option. A simple search for a Docker plugin could show almost 26 results, and upon further review, one of the top results has eight plugin dependencies. If a team is using plugins for Docker, Kubernetes, GitLab, Go – those dependencies can really add up, and that’s where teams start seeing brittle pipelines.\n\nTechnology is constantly evolving, and keeping up with all of these dependencies can spell trouble for pipelines. The last thing you want is a broken deployment pipeline because [the pipeline itself is broken vs. the actual software artifact or build that’s being tested](https://harness.io/2018/09/4-reasons-your-jenkins-pipelines-are-brittle/).\n\nA vast majority of Jenkins plugins were created by third-party developers, meaning they can vary in quality and [some plugins lose support without notice](https://thenewstack.io/many-problems-jenkins-continuous-delivery/). Abandoned plugins are out there because their creators have opted to work on something else. Teams have to be diligent with maintaining these plugins with every new Jenkins version, but as any Jenkins admin can tell you, [this process has not always gone over well](https://jenkins.io/blog/shifting-gears/).\n\n## Plugins and maintenance\n\nWe touched on this briefly but admins are mostly in agreement that Jenkins maintenance is, to put it simply, not a great time. There’s a reason why developers often talk about their love/hate relationship with Jenkins – **_yay!_**, there’s a plugin for everything I need, **_oh no!_** I’m a Jenkins plugin maintainer now.\n\nUpgrading one plugin means you’ll likely have to update many others, and many Jenkins admins do this directly on their production Jenkins master. In one example, [Blue Ocean requires dozens of dependencies, many of which you may have no use for](https://cb-technologists.github.io/posts/jenkins-plugins-good-bad-ugly/), such as the Bitbucket Pipeline for Blue Ocean and the GitHub Pipeline for Blue Ocean plugins, even if you don’t use either Bitbucket or GitHub for source control.\n\n## Plugins: Pros and cons\n\nThere are pros and cons to anything and plugins are no exception. There is a lot to love about plugins:\n\n*   Flexibility\n*   Customization\n*   Convenience\n\nAnd there are things to be wary of:\n\n*   Maintenance\n*   Dependencies\n*   Lack of support\n*   Security vulnerabilities\n\nWith Jenkins’s modular architecture there’s a building block for everything you need. However, an ecosystem built entirely on plugins is going to require some discipline, and that means dedicating resources into maintaining that plugin environment.\n\nPlugins can be a great asset for a DevOps team. As CloudBees pointed out, [even GitLab uses plugins](https://docs.gitlab.com/ee/administration/file_hooks.html). We just don’t think you should have to use plugins for really basic tasks. In the end, it’s important for organizations to weigh the pros and cons of different platforms for themselves. You can check out our ebook, “The benefits of single application CI/CD,” and see how we stack up against other CI tools.\n\nCover image by [Fernando Lavin](https://unsplash.com/@filmlav?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/@filmlav?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText).\n{: .note}\n",[109,9],{"slug":6827,"featured":6,"template":686},"plugin-instability","content:en-us:blog:plugin-instability.yml","Plugin Instability","en-us/blog/plugin-instability.yml","en-us/blog/plugin-instability",{"_path":6833,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6834,"content":6839,"config":6844,"_id":6846,"_type":14,"title":6847,"_source":16,"_file":6848,"_stem":6849,"_extension":19},"/en-us/blog/positive-outcomes-ci-cd",{"title":6835,"description":6836,"ogTitle":6835,"ogDescription":6836,"noIndex":6,"ogImage":1624,"ogUrl":6837,"ogSiteName":670,"ogType":671,"canonicalUrls":6837,"schema":6838},"4 Benefits of CI/CD","Learn how to implement and measure a successful CI/CD pipeline strategy and help your DevOps team deliver higher quality software, faster!","https://about.gitlab.com/blog/positive-outcomes-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"4 Benefits of CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-06-27\",\n      }",{"title":6835,"description":6836,"authors":6840,"heroImage":1624,"date":6841,"body":6842,"category":679,"tags":6843},[788],"2019-06-27","\n[CI/CD](/topics/ci-cd/) helps DevOps teams ship higher quality software, faster, for improved software deployment. But is all [CI/CD](/topics/ci-cd/) created equal? What do the benefits of continuous integration, continuous delivery, and continuous deployment look like and how do you know you're on the right track?\n\nIn this four-part series, we talk about modernizing your CI/CD: Challenges, impact, outcomes, and solutions. In [part one](/blog/modernize-your-ci-cd/), we focused on common CI/CD challenges. In [part two](/blog/business-impact-ci-cd/), we talked about the revenue impacts. Today, we’ll talk about what CI/CD can deliver and how to measure its success.\n\nIf these problems hit a little too close to home, stay tuned for part four where we dive deeper into finding the right CI/CD solution for you.\n\n## What are some of the benefits of a good CI/CD strategy?\n\n### 1. Increased speed of innovation and ability to compete in the marketplace\n\nTwo identical companies: One implements [CI/CD technology](/topics/ci-cd/) and the other doesn’t. Who do you think deploys applications faster? While this seems like a silly comparison, because _of course_ the company with more automation deploys faster, there are organizations out there still convinced they don’t need CI/CD because they’re not looking at their competition. Organizations that understand the importance of CI/CD are setting the pace of innovation for everyone else.\n\n### 2. Code in production is making money instead of sitting in a queue waiting to be deployed\n\nOrganizations that have implemented CI/CD are making revenue, satisfying customers, and getting user feedback on the product features they deploy, not waiting for a manual check to see if the code is up to par. They already know the code is good because they have tests that are automated, and continuous delivery means that code is deployed automatically if it meets certain standards. They’ve removed human error and delays from the process so they can ship more code to production.\n\n### 3. Great ability to attract and retain talent\n\nEngineers that can focus on what they’re best at will be happier and more productive, and that has far-reaching impact. Turnover can be expensive and disruptive. A good CI/CD strategy means engineers can work on important projects and not worry about time-consuming manual tasks. They can also work confidently knowing that errors are caught automatically, not right before deployment. This kind of cooperative engineering culture inevitably attracts talent.\n\n### 4. Higher quality code and operations due to specialization\n\nThe development team can focus on dev. The operations team can focus on ops. Bad code rarely makes it to production because continuous testing is automated. Developers can focus on the code rather than the production environment, and operations doesn’t have to feel like a gatekeeper or a barrier. Both teams can work to their strengths, and automated handoffs make for seamless processes for the entire team. [This kind of cooperation makes DevOps possible](/topics/devops/build-a-devops-team/) and improves code quality.\n\n## What capabilities are required to make this happen?\n\n### 1. Robust CI/CD\n\nWhen we use the term “robust,” it’s all about avoiding half-baked or partial solutions. There are several CI/CD solutions out there but there are varying degrees of effectiveness. Continuous integration and continuous delivery go hand in hand, so having a solution that offers both is ideal. The tool you use should offer the automation you need, not just some. If your CI/CD tool is prone to failure or “brittle,” it can be just one more thing to manage. This was precisely why [the team at Ticketmaster replaced Jenkins CI and moved to weekly releases](/blog/continuous-integration-ticketmaster/), decreasing their pipeline execution time from two hours to only _eight minutes_ to build, test, and publish artifacts.\n\n### 2. Containers and Kubernetes\n\nContainers have made a huge impact on the way companies build and deploy code. While it was once difficult to develop applications with a [microservices architecture](/blog/strategies-microservices-architecture/), over the past five years it has become considerably easier with container orchestration tools like Kubernetes, comprehensive CI/CD tools that automate testing and deployments, and APIs that update automatically. Breaking up services so they can run independently reduces dependencies and creates better workflows.\n\n### 3. Functionality for the entire DevOps lifecycle\n\nVisibility is a huge asset when improving DevOps workflows. For some teams, they can have several tools handling different facets of the software development lifecycle (SDLC), which creates integration issues, maintenance issues, visibility issues, and is [just plain expensive](/calculator/roi/) from a cost standpoint. A complex toolchain can also weaken security. In a [Forrester survey of IT professionals](/resources/downloads/201906-gitlab-forrester-toolchain.pdf), 45% said that they had difficulty ensuring security across the toolchain.\n\n## How would you measure the success of a CI/CD strategy?\n\n### 1. Cycle time\n\nCycle time is the speed at which a [DevOps team](/topics/devops/) can deliver a functional application, from the moment work begins to when it is providing value to an end user.\n\n### 2. Time to value\n\nOnce code is written, how long before it’s released? This delay from when code is written to running in production is the time to value, and is a bottleneck for many organizations. Continuous delivery as well as [examining trends in the QA process](/blog/trends-in-test-automation/) can help to overcome this barrier to quick deployments and frequent releases.\n\n### 3. Uptime, error rate, infrastructure costs\n\nUptime is one of the biggest priorities for the ops team, and with a good CI/CD strategy that automates different processes, they should be able to focus more on that goal. Likewise, error rates and infrastructure costs can be easily measured once CI/CD is put in place. Operations goals are a key indicator of process success.\n\n### 4. Team retention rate\n\nHappy developers stick around, so looking at retention rates is a reliable way to gauge how well new development processes and applications are working for the team. It might be tough for developers to speak up if they don’t like how things are going, but looking at retention rates can be one step in identifying potential problems.\n\nThe benefits of a good CI/CD strategy are felt throughout an organization: From HR to operations, teams work better and achieve goals. In such a competitive development landscape, having the right CI/CD in place gives any company an edge.\n\nSo what makes “good” CI/CD? We invite you to compare GitLab CI/CD to other CI tools and see why we were rated #1 in the Forrester CI Wave™.\n",[9,109,683],{"slug":6845,"featured":6,"template":686},"positive-outcomes-ci-cd","content:en-us:blog:positive-outcomes-ci-cd.yml","Positive Outcomes Ci Cd","en-us/blog/positive-outcomes-ci-cd.yml","en-us/blog/positive-outcomes-ci-cd",{"_path":6851,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6852,"content":6858,"config":6864,"_id":6866,"_type":14,"title":6867,"_source":16,"_file":6868,"_stem":6869,"_extension":19},"/en-us/blog/postman-integration-with-gitlab-makes-your-api-workflows-easier",{"title":6853,"description":6854,"ogTitle":6853,"ogDescription":6854,"noIndex":6,"ogImage":6855,"ogUrl":6856,"ogSiteName":670,"ogType":671,"canonicalUrls":6856,"schema":6857},"Postman integration with GitLab makes API workflows easier","Learn how to use the git integration to link APIs in Postman to GitLab cloud repos.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671434/Blog/Hero%20Images/introducing-continuous-workflows.jpg","https://about.gitlab.com/blog/postman-integration-with-gitlab-makes-your-api-workflows-easier","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Postman integration with GitLab makes API workflows easier\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andy Rogers\"}],\n        \"datePublished\": \"2022-08-24\",\n      }",{"title":6853,"description":6854,"authors":6859,"heroImage":6855,"date":6861,"body":6862,"category":791,"tags":6863},[6860],"Andy Rogers","2022-08-24","\n\nAPIs are more than just an interface. From a development lifecycle perspective, an API includes source code, definition files, tests, performance measurements, documentation, security audits, deployments, and feedback from API consumers. All of these elements are required for a successful API implementation. So, in partnership with GitLab, Postman created a git integration that allows users to link APIs in Postman to their GitLab cloud repos (on-prem versions of GitLab are only supported on [Postman Enterprise](https://www.postman.com/pricing/)).\n\nThe [Postman API Platform](https://blog.postman.com/new-postman-api-platform-redefining-api-management-for-api-first-world/) is designed to help teams collaborate seamlessly by providing tools for the entire API lifecycle. We understand that a fundamental part of the API lifecycle includes [developer workflows](https://blog.postman.com/the-reimagined-api-first-workflow-for-developers/) centered around code and source control.\n\n![illustration](https://about.gitlab.com/images/blogimages/postman1.png){: .shadow}\n\n## 4 key benefits for better collaboration\n\nThe launch of this integration earlier in the year provides four key benefits that empower teams to work faster and better together:\n\n**1.** It introduces the concept of version control into Postman. Users are now able to manage and sync branches, releases, versions, and tags for their APIs in GitLab and Postman. \n\n\n![screenshot of drop-down menu](https://about.gitlab.com/images/blogimages/postman2.png){: .shadow}\n\n\n**2.** Elements created in Postman can be pushed to a user’s GitLab repository, where the schema and collections can coexist alongside the source code. Likewise, branching workflows that your team might already be using can now be followed in Postman; external changes to code and API definitions are reviewable and can be merged back to Postman.\n\n\n![screenshot of branch info](https://about.gitlab.com/images/blogimages/postman3.png){: .shadow}\n\n**3.** This integration enables developers to think about API elements as the API itself, instead of treating code, API definitions, documentation, collections, tests, monitors, etc. as independent entities. All of these constitute the API. Moreover, this allows a higher-level view of the entire API, rather than just the source code — a critical requirement for any organization who wants to build a structured and robust API program.\n\n\n![screenshot of API info](https://about.gitlab.com/images/blogimages/postman4.png){: .shadow}\n\n\n**4.** The Postman-GitLab integration greatly minimizes the likelihood that downstream teams and API consumers will interact with outdated (or even deprecated) APIs or API elements. Users don’t have to spend time deciphering what API, collection, or documentation is current, since they can see what version they are working with all the way back to the code. In Postman, users also have direct access to real-time collaborative tools such as commenting and forking/merging to maintain synchronization between downstream API consumption and the source of truth.\n\n![illustration](https://about.gitlab.com/images/blogimages/postman5.png){: .shadow}\n\n## An integration for the API-first world\n\nOur partnership with GitLab supports our commitment to building Postman as the platform for the [API-first world](https://api-first-world.com/). With integrations like this, [API-first companies](https://blog.postman.com/what-is-an-api-first-company/) are now more productive, can deliver higher-quality products, and are able to build stronger ecosystems of developers, partners, and consumers. \n\nTo get started with the GitLab integration, check out [our guide](https://blog.postman.com/the-reimagined-api-first-workflow-for-developers/) and our how-to video for GitLab integration config:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/BL8DFOPncMc\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n_Andy Rogers is product manager at Postman._\n\n\n",[9,683,231],{"slug":6865,"featured":6,"template":686},"postman-integration-with-gitlab-makes-your-api-workflows-easier","content:en-us:blog:postman-integration-with-gitlab-makes-your-api-workflows-easier.yml","Postman Integration With Gitlab Makes Your Api Workflows Easier","en-us/blog/postman-integration-with-gitlab-makes-your-api-workflows-easier.yml","en-us/blog/postman-integration-with-gitlab-makes-your-api-workflows-easier",{"_path":6871,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6872,"content":6877,"config":6884,"_id":6886,"_type":14,"title":6887,"_source":16,"_file":6888,"_stem":6889,"_extension":19},"/en-us/blog/production-grade-infra-devsecops-with-five-minute-production",{"title":6873,"description":6874,"ogTitle":6873,"ogDescription":6874,"noIndex":6,"ogImage":5897,"ogUrl":6875,"ogSiteName":670,"ogType":671,"canonicalUrls":6875,"schema":6876},"GitOps & DevSecOps for production infrastructure in minutes","Unlock production-grade infrastructure and development workflows in under five minutes with Five Minute Production App: a blend of solutions offered by AWS, Hashicorp Terraform, and GitLab.","https://about.gitlab.com/blog/production-grade-infra-devsecops-with-five-minute-production","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Production-grade infrastructure, GitOps convergence, and DevSecOps in under 5 minutes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sri Rangan\"}],\n        \"datePublished\": \"2021-02-24\",\n      }",{"title":6878,"description":6874,"authors":6879,"heroImage":5897,"date":6881,"body":6882,"category":791,"tags":6883},"Production-grade infrastructure, GitOps convergence, and DevSecOps in under 5 minutes",[6880],"Sri Rangan","2021-02-24","\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2021-03-10.\n{: .note .alert-info .text-center}\n\nThis is a story about achieving production-grade infrastructure in under five minutes.\\\\\nThis is a story about achieving production-grade DevSecOps in under five minutes.\\\\\nThis is a story about achieving total convergence of GitOps in under five minutes.\n\nMy name is Sri and over the last three months and I worked closely with GitLab co-founder [DZ](/company/team/#dzaporozhets) in building \"Five Minute Production App.\"\n\nThe app blends solutions offered by AWS, Hashicorp Terraform, and GitLab, and offers production-grade infrastructure and development workflows in under five minutes.\n\n![Five Minute Production App Diagram](https://about.gitlab.com/images/blogimages/five-min-prod-01-complete-flow.png){: .shadow.medium.center}\n\nApart from the efficiencies gained from using Five Minute Production App, you benefit by achieving stateful, production-ready infrastructure on the AWS hypercloud.\n\nWe started with AWS first, as it is the hypercoud leader today. Support for Azure and Google Cloud is on the roadmap.\n\nOur vision and design decisions are explained in the [README](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template#quickly).\n\n## Quickstart \n\nWe start with your GitLab project which has the source code of your web application. Regardless of which language or framework you use, your web application is packaged as a container image and stored within your GitLab project's Container Registry.\nThis is the Build stage.\n\nThis is followed by the Provision stage where Terraform scripts connect to AWS and create a secure environment for your web application.\nThe environments provisioned relate to your Git branching workflow.\nLong-lived Git branches create long-lived environments, and short-lived Git branches correspond to short-lived environments.\n\nResources provisioned include an Ubuntu VM, scalable PostgreSQL database, a Redis cluster, and S3 object storage.\nWe consider these elements as the building blocks for majority of web applications, and many of these fall under AWS free tier.\n\nThe infra state and credentials are stored within your GitLab project's managed Terraform state.\n\nFinally, we reach the Deploy stage which:\n1. Retrieves the deployable image from the GitLab Container Registry\n1. Retrieves the infrastructure credentials from the Gitlab Managed Terraform State, and\n1. Proceeds to deploy your web application\n\nEverything is achieved by including these two lines in your `.gitlab-ci.yml` file.\n\n```yaml\ninclude:\n  remote: https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template/-/raw/stable/deploy.yml\n```\n\nLet's look at the complete process in more detail.\n\n![Three stages of Five Minute Production App](https://about.gitlab.com/images/blogimages/five-min-prod-02-pipeline.png){: .shadow.medium.center}\nThe three stages of Five Minute Production App\n{: .note.text-center}\n\n## Build and package\n\nThe Build stage is where it all begins. Five Minute Production App reuses the [Auto Build stage](https://docs.gitlab.com/ee/topics/autodevops/stages.html#auto-build) from the GitLab Auto DevOps pipeline.\n\nAuto Build builds and packages web applications that are:\n1. Containerized with a Dockerfile, or\n2. Compatible with the Cloud Native buildpack, or\n3. Compatible with the Heroku buildpack\n\nThus, web applications across multitudes of technologies are supported, including web frameworks such as Rails, Django, Express, Next.js, Spring, etc.\nand programming languages including Python, Java, Node.js, Ruby, Clojure, etc.\n\nOnce the Auto Build job has finished execution, the newly created container image is stored as an artifact in your GitLab project's Container Registry.\n\n## Provision the infrastructure\n\nThe next step, Provision, prepares infrastructure resources in AWS.\nThe first requirement here is the presence of AWS credentials stored as CI/CD variables at the project or group level.\nOnce valid AWS credentials are found, a Terraform script is executed to generate resources in AWS.\n\nThese resources include:\n1. EC2 VM based on Ubuntu 20.04 LTS\n2. PostgreSQL database managed by AWS RDS\n3. Redis cluster managed by AWS ElastiCache\n4. S3 bucket for file storage\n5. Email Service credentials managed by AWS SES\n\nThe most critical resource is the PostgreSQL service which has daily backups enabled.\nPostgreSQL data is snapshotted if the infrastructure resource is \"destroyed\" through a manual user action via the Five Minute Production App pipeline.\n\nThe EC2 VM is the only service accessible publicly. Ports 22, 80 and 443 are exposed.\nEvery other resource described above is part of a secure, private network, hidden from the public web, accessible ony via the EC2 instance and your web applicable deployed there.\n\nThe stateful services and environments are tied to your Git branches.\\\\\nThis means every Git branch creates a new environment with these resource sets.\\\\\nWe don't have a preference on your Git branching and environments lifecycle.\\\\\nUse long-lived or short-lived branches as you see fit, just keep in mind that long-lived branches leads to long-lived environments and short-lived branches leads to short-lived environments.\n\n![Infrastructure resources provisioned on AWS](https://about.gitlab.com/images/blogimages/five-min-prod-03-infra-resources.png){: .shadow.medium.center}\nInfrastructure resources provisioned on AWS\n{: .note.text-center}\n\n## Deploy your web application\n\nFinally comes the Deploy stage.\n\nThis is where the deploy script retrieves your web application package (container image) from the GitLab Container Registry, then retrieves the EC2 instance\ncredentials from the GitLab Managed Terraform State, and proceeds to deploy the relevant version of your web application in its environment.\n\nModern web applications might require additional commands being executed after each deployment or after the initial deployment,\nand these commands can be defined as variables in your `.gitlab-ci.yml` file.\n\nFinally, with the help of Certbot from Letsencrypt, SSL certificates are generated and configured for your web application.\nIf you have defined the `CERT_DOMAIN` CI/CD variable the SSL certificate will be generated for your custom domain name.\nOtherwise the generated SSL certificate uses a dynamic URL that Five Minute Production App prepares for you.\n\n## Conclusion\n\nThere we have it. A simple yet production-ready setup for your web application. If you are looking for an AWS-based setup, this is ready for usage.\n\nIf you are looking for something similar but not quite Five Minute Production App, this serves as an example of how to converge infrastructure-as-code with software development and provide seamless continuous deployment workflows.\n\nIn my personal experience, this is one of the most complete examples of GitOps:\n\n1. Your application source code lives in your GitLab project\n2. Your infrastructure defined as code lives in your GitLab project\n3. Your CI/CD pipeline lives in your GitLab project\n4. Your infrastructure state lives in your GitLab project\n5. Your infrastructure secrets and credentials live in your GitLab project\n6. Your environments configuration lives in your GitLab project\n\nThis complete GitOps convergence is not specifically configured for one project. It can be included as a template from multiple projects.\nThere is no reason why the GitLab project in your organization cannot be the single source of truth for everything.\n\n### Links\n\n- [Five Minute Production App](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template/-/blob/master/README.md)\n- [Reference Examples](https://gitlab.com/gitlab-org/5-minute-production-app/examples)\n\n### About the author\n\n[Sri Rangan](mailto:sri@gitlab.com), an Enterprise Solutions Architect with GitLab, is a core-contributor and maintainer\nof [Five Minute Production App](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template/-/blob/master/README.md).",[976,977,9,1041,534,1339],{"slug":6885,"featured":6,"template":686},"production-grade-infra-devsecops-with-five-minute-production","content:en-us:blog:production-grade-infra-devsecops-with-five-minute-production.yml","Production Grade Infra Devsecops With Five Minute Production","en-us/blog/production-grade-infra-devsecops-with-five-minute-production.yml","en-us/blog/production-grade-infra-devsecops-with-five-minute-production",{"_path":6891,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6892,"content":6898,"config":6904,"_id":6906,"_type":14,"title":6907,"_source":16,"_file":6908,"_stem":6909,"_extension":19},"/en-us/blog/protecting-manual-jobs",{"title":6893,"description":6894,"ogTitle":6893,"ogDescription":6894,"noIndex":6,"ogImage":6895,"ogUrl":6896,"ogSiteName":670,"ogType":671,"canonicalUrls":6896,"schema":6897},"How to limit access to manual pipeline gates and deployments using GitLab","Let's look at how to use protected environments to set up access controls for production deployments and manual gates.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681105/Blog/Hero%20Images/protect_manual_jobs.jpg","https://about.gitlab.com/blog/protecting-manual-jobs","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to limit access to manual pipeline gates and deployments using GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Thao Yeager\"}],\n        \"datePublished\": \"2020-02-20\",\n      }",{"title":6893,"description":6894,"authors":6899,"heroImage":6895,"date":6901,"body":6902,"category":791,"tags":6903},[6900],"Thao Yeager","2020-02-20","\n\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2020-02-21.\n{: .alert .alert-info .note}\n\nIn our world of automation, why would anyone want to do something manually? Manual has become almost synonymous with inefficient. But, when it comes to CI/CD pipelines, a properly configured **manual** job can be a powerful way to control deployments and satisfy compliance requirements. Let’s take a look at how manual jobs can be defined to serve two important use cases: Controlling who can deploy, and setting up manual gates.\n\n## Limit access to deploy to an environment\n\nDeploying to production is a mission-critical occurence that should be protected. Projects with a Kubernetes cluster could benefit from moving to a continuous deployment (CD) model in which a [branch or merge request, once merged, is auto-deployed to production](https://docs.gitlab.com/ee/topics/autodevops/index.html#auto-deploy), and the absence of human intervention avoids mishaps. But for projects not yet configured for CD, let's consider this use case: Imagine a pipeline with a manual job to deploy to prod, which can be triggered by any user with access to push code. The risk of a unplanned, unintended production deployment is very real.\n\nFortunately, it’s possible to use [protected environments](https://docs.gitlab.com/ee/ci/environments/protected_environments/) to prevent just anyone from deploying to production. When [configuring a protected environment](https://docs.gitlab.com/ee/ci/environments/protected_environments.html#protecting-environments), you can define the roles, groups, or users to whom deploy access is granted. The protected environment can then be defined in a manual job to deploy which limits who can run it. The configuration could look something like this:\n\n```yaml\ndeploy_prod:\n  stage: deploy\n  script:\n    - echo \"Deploy to production server\"\n  environment:\n    name: production\n    url: https://example.com\n  when: manual\n  only:\n    - master\n```\n\nIn the example above, the keyword `environment` is used to reference a protected environment (as [configured in project settings](https://docs.gitlab.com/ee/ci/environments/protected_environments.html#protecting-environment)) with a list of users who can run the job, in this case deploy to the named environment. Users without access see a disabled **play** button and are unable to execute the job.\n\n## Add an approval step\n\nCompliance rules may specify that approval is required for certain activities in a workflow, even if they aren't technically a deployment step themselves. In this use case, an approval step can also be added in the pipeline that prompts an authorized user to take action to continue. This can be achieved by structuring your pipeline with an \"approve\" stage containing a special manual job – for example, the YAML to insert an approval stage before deployment could look like this:\n\n```yaml\nstages:\n  - build\n  - approve\n  - deploy\n\nbuild:\n  stage: build\n  script:\n    - echo Hello!\n\napprove:\n  stage: approve\n  script:\n    - echo Hello!\n  environment:\n    name: production\n    url: https://example.com\n  when: manual\n  allow_failure: false\n  only:\n    - master\n\ndeploy:\n  stage: deploy\n  script:\n    - echo Hello!\n  environment:\n    name: production\n    url: https://example.com\n  only:\n    - master\n```\n\nIn the YAML above, `allow_failure: false` [defines the manual job as \"blocking\"](https://docs.gitlab.com/ee/ci/yaml/#whenmanual), which will cause the pipeline to pause until an authorized user gives \"approval\" by clicking on the **play** button to resume. Only the users part of that environment list will be able to perform this action. In this scenario, the UI view of the pipeline in the example CI configuration above would look like this:\n\n![Pipeline view of approval stage manual job](https://about.gitlab.com/images/blogimages/manual_job_approve_stage_ui.png){: .shadow}\n\n## Summary\n\nAs illustrated in the YAML examples and image above, manual jobs defined with protected environments and blocking attributes are effective tools for handling compliance needs as well as for ensuring there are proper controls over production deployments.\n\nTell us how using protected environments with manual jobs has secured your deployments or whether blocking manual jobs helps you meet compliance and auditing. [Create an issue in the GitLab project issue tracker](https://gitlab.com/gitlab-org/gitlab/issues/new) to share your feedback with us.\n\nCover image by [Diane Walton](https://unsplash.com/photos/BNnzmBmnPg4) on [Unsplash](https://unsplash.com)\n{: .note}\n",[109,1731,683,916,9],{"slug":6905,"featured":6,"template":686},"protecting-manual-jobs","content:en-us:blog:protecting-manual-jobs.yml","Protecting Manual Jobs","en-us/blog/protecting-manual-jobs.yml","en-us/blog/protecting-manual-jobs",{"_path":6911,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6912,"content":6918,"config":6924,"_id":6926,"_type":14,"title":6927,"_source":16,"_file":6928,"_stem":6929,"_extension":19},"/en-us/blog/qpage-on-the-devops-platform",{"title":6913,"description":6914,"ogTitle":6913,"ogDescription":6914,"noIndex":6,"ogImage":6915,"ogUrl":6916,"ogSiteName":670,"ogType":671,"canonicalUrls":6916,"schema":6917},"QPage improves deployment & efficiency using GitLab platform","QPage went from a homegrown CI/CD solution to the GitLab DevOps Platform and found more benefits than expected.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664472/Blog/Hero%20Images/gitlabflatlogomap.png","https://about.gitlab.com/blog/qpage-on-the-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How QPage achieved automatic deployment and efficiency using the GitLab DevOps Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2021-09-15\",\n      }",{"title":6919,"description":6914,"authors":6920,"heroImage":6915,"date":6921,"body":6922,"category":769,"tags":6923},"How QPage achieved automatic deployment and efficiency using the GitLab DevOps Platform",[745],"2021-09-15","Deployment automation is essential for any company involved in software development to stay competitive. [QPage](https://www.qpage.one/), a company that provides an end-to-end sourcing and recruitment solution for SMEs, realized it quickly and migrated to GitLab’s DevOps Platform to accelerate their deployment process.\n\nWe spoke with Pouya Lotfi, the co-founder of QPage, to see how they use GitLab at QPage and how it has helped the company.\n\n## Why GitLab?\n\nQPage was initially using a local bespoke CI/CD for about the first two months, but they soon realized they needed a more professional DevOps Platform system. Because Pouya and the team at QPage had already used GitLab at a previous employer, they knew it would be the right fit. So, they didn’t consider other options and opted for GitLab straight away.\n\n**Everything you need to know about [a DevOps platform](/solutions/devops-platform/)**\n\n \"We started from the local CI/CD, but soon we realized that would be something we can actually do with GitLab,” said Pouya Lotfi, co-founder QPage. “I had the experience with GitLab back in the other companies I was part of, so we soon actually migrated to GitLab, and we brought everything we could actually have in GitLab’s DevOps Platform to accelerate our deployment and the processes.”\n\nQPage chose GitLab’s paid subscription plan.\n\n## How GitLab’s DevOps Platform works\n\nQPage is using several CI/CD integrations that GitLab offers.\n\n\"We are using it end-to-end, but we did use the benefit of integrating it with other platforms as well,\" Pouya said.\n\nThey are using the GitLab-Kubernetes integration for CI/CD funnels, which allows building, testing, and deploying to cluster, as well as using Auto DevOps to automate the CI/CD process.\n\nAnother key integration for QPage is the JIRA integration - they get notifications and assign a ticket to one of the developers/engineers. However, a part of this process is still done manually as they are not yet using issues, boards, and milestones within GitLab. But, they are considering using GitLab altogether to automate the whole process.\n\n**Get the [most out of your DevOps platform](/topics/devops/seven-tips-to-get-the-most-out-of-your-devops-platform/)**\n\nQPage is also taking advantage of the Docker-GitLab integration. They use containers and images, push them through the GitLab CI and then finally deploy.\n\nThey start with the staging environment, then move to testing and QA, and finally, they push it to the production; their deployment and release part is divided into staging and production. For deployment, QPage is using cloud providers AWS and Digital Ocean.\n\n## The dev team and GitLab\n\nThe developers at QPage find GitLab an easy solution to work with because they already knew how it worked; one of QPage’s basic criteria to hire a developer or an engineer is to have experience with using GitLab or GitHub CI/CD.\n\nAdditionally, they find GiLab’s documentation very helpful. When they come across any problem with using GitLab, they quickly reach for the documentation to solve their problems. This eliminates the bottleneck of depending on one person on the team, who is an expert, to solve a problem.\n\n## Key DevOps Platform benefits\n\nOne of the major benefits QPage has seen from using GitLab is achieving automatic deployment. GitLab has made their CI/CD process more efficient as they have integrated it with tools like Kubernetes, Docker, and JIRA.\n\nThey believe the management within GitLab is also a huge plus where they can now test the codes and push them. Additionally, they like the visibility of work and collaboration among the developers. Their team can now know the status of the deployment in terms of whether it was successful or it failed and where it was deployed, such as the staging environment or the production.\n\n**How [DevOps gets easier](https://learn.gitlab.com/smb-devops-1/simplify-devops) with a DevOps platform**\n\nAnother big benefit of migrating to GitLab is the operational efficiency. Their deployment time has now reduced by 80% - with the local CI/CD, it took around 6-8 hours, but with GitLab, it’s between 15-20 minutes.\n\n \"In the beginning, when we had done it through the local server CI/CD, it would take around 6-8 or 10 hours, and that was a real hassle for us,\" Pouya said. “With our GitLab migration, and we push something to production, it takes like 15 to 20 to 30 minutes.”\n\nAlthough QPage has one main product, they have around 29 sub-products, like API algorithms, and they've seen great optimization in deployment with all of their products after using GitLab.\n\nLast but not least, QPage believes using GitLab is also cost-effective for them.",[9,793,976],{"slug":6925,"featured":6,"template":686},"qpage-on-the-devops-platform","content:en-us:blog:qpage-on-the-devops-platform.yml","Qpage On The Devops Platform","en-us/blog/qpage-on-the-devops-platform.yml","en-us/blog/qpage-on-the-devops-platform",{"_path":6931,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6932,"content":6938,"config":6943,"_id":6945,"_type":14,"title":6946,"_source":16,"_file":6947,"_stem":6948,"_extension":19},"/en-us/blog/quick-start-guide-for-gitlab-workspaces",{"title":6933,"description":6934,"ogTitle":6933,"ogDescription":6934,"noIndex":6,"ogImage":6935,"ogUrl":6936,"ogSiteName":670,"ogType":671,"canonicalUrls":6936,"schema":6937},"Quickstart guide for GitLab Remote Development workspaces","Learn how to create a workspace from your GitLab account and work directly from the remote development environment.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664219/Blog/Hero%20Images/2023-06-22-quickstart-workspaces-cover-image2.png","https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Quickstart guide for GitLab Remote Development workspaces\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2023-06-26\",\n      }",{"title":6933,"description":6934,"authors":6939,"heroImage":6935,"date":6940,"body":6941,"category":791,"tags":6942},[2120],"2023-06-26","\nGitLab 16.0 introduced [Remote Development workspaces (beta)](https://about.gitlab.com/releases/2023/05/22/gitlab-16-0-released/#remote-development-workspaces-available-in-beta-for-public-projects), an exciting addition to the GitLab platform that empowers teams to build and deliver software more efficiently.\n\nThis guide provides step-by-step instructions on how to create a workspace directly from your GitLab account and work directly from the remote development environment. You will work in the Web IDE, a Visual Studio Code browser version, seamlessly integrated into the workspace. \n\nFrom this quick start, you will learn how to create a workspace, use the Web IDE Terminal to install dependencies or start your server, and view your running application. \n\nTo learn more about Remote Development in GitLab, we recommend reading this informative blog post, \"[A first look at workspaces](https://about.gitlab.com/blog/introducing-workspaces-beta/),\" and the [workspaces docs](https://docs.gitlab.com/ee/user/workspace/).\n\nHere are the steps covered in this tutorial:\n\n- [Prerequisites](#prerequisites)\n- [Locate DevFile at the root of repository](#locate-devfile-at-the-root-of-repository)\n- [Create your workspace](#create-your-workspace)\n- [Install dependencies and previewing your application in the workspace](#install-dependencies-and-previewing-your-application-in-the-workspace)\n- [Make changes to the application and previewing the updated version](#make-changes-to-the-application-and-previewing-the-updated-version)\n- [Commit the change](#commit-the-change)\n- [Explore the demo](#explore-the-demo)\n- [Try out workspaces](#try-out-workspaces)\n\n## Prerequisites \nPrior to enabling developers to create workspaces, there are a few prerequisites such as bring your own Kubernetes cluster, and install and configure the GitLab agent for Kubernetes on it. Additionally, certain configuration steps must be completed on the cluster. You can find detailed instructions for all these steps in [our workspaces prequisites documentation](https://docs.gitlab.com/ee/user/workspace/configuration.html#prerequisites). Once the prerequisites are properly configured, developers who hold Developer role or above within the root group will gain the ability to create workspaces.\n\n## Locate DevFile at the root of repository\nA [devfile](https://devfile.io/docs/2.2.0/devfile-ecosystem) is a declarative configuration file, in YAML syntax, used to define and describe the development environment for a software project. It provides a standardized way to specify the necessary tools, languages, runtimes, and other components required for developing an application.\n\nTo initiate a workspace, it is necessary to have a devfile located at the root of the repository. In this blog post, we will utilize a project that contains a devfile, accessible [here](https://gitlab.com/gitlab-da/use-cases/remote-development/example-nodejs-express-app/-/raw/main/.devfile.yaml). \n\n```yaml\nschemaVersion: 2.2.0\ncomponents:\n  - name: tooling-container\n    attributes:\n      gl/inject-editor: true\n    container:\n      # NOTE: THIS IMAGE EXISTS ONLY FOR DEMO PURPOSES AND WILL NOT BE MAINTAINED\n      image: registry.gitlab.com/gitlab-org/remote-development/gitlab-remote-development-docs/debian-bullseye-ruby-3.2-node-18.12:rubygems-3.4-git-2.33-lfs-2.9-yarn-1.22-graphicsmagick-1.3.36-gitlab-workspaces\n      memoryRequest: 1024M\n      memoryLimit: 2048M\n      cpuRequest: 500m\n      cpuLimit: 1000m\n      endpoints:\n      - name: http-3000\n        targetPort: 3000\n```\nFor more information, see the [GitLab documentation](https://docs.gitlab.com/ee/user/workspace/#devfile) and [devfile documentation](https://devfile.io/docs/2.2.0/devfile-schema).\n\n## Create your workspace \n1. Make sure you have a [Developer role or above](https://docs.gitlab.com/ee/user/permissions.html) in the root group, and the above prerequisites configured properly.\n2. Fork [this project](https://gitlab.com/gitlab-da/use-cases/remote-development/example-nodejs-express-app) to the GitLab group for which you have a Developer role or above. \n3. Switch contexts and select `Your work`.\n![Your work](https://about.gitlab.com/images/blogimages/2023-07-10-your-work.png){: .shadow}\n4. Select `Workspaces`.\n5. Select `New workspace`.\n6. Select the project you forked or another project that has a `.devfile.yaml` file at the root of the repository. \n7. Select the [cluster agent](https://docs.gitlab.com/ee/user/workspace/#prerequisites) owned by the group the project belongs to.\n8. In `Time before automatic termination`, enter the number of hours until the workspace automatically terminates. This timeout is a safety measure to prevent a workspace from consuming excessive resources or running indefinitely. \n9. Select `Create workspace`. \n\n![create ws](https://about.gitlab.com/images/blogimages/create_workspace.png){: .shadow}\n\nThe workspace will be deployed to the cluster and might take a few minutes to start. To access the workspace, under Preview, select the workspace link.\n\n![ws list](https://about.gitlab.com/images/blogimages/workspaces_list.png){: .shadow}\n\n## Install dependencies and previewing your application in the workspace\nAfter creating your workspace, the [Web IDE using VS Code](https://docs.gitlab.com/ee/user/workspace/#web-ide) is injected into it, and the repository is cloned to the image. Consequently, you gain immediate access to your code and can commence working on it right away.\n\nYou can now open the terminal, install any missing dependencies, and start the application.\n\n![Terminal](https://about.gitlab.com/images/blogimages/ws-terminal.png){: .shadow}\n\n1. To open the terminal, from the left menu, select `Terminal`, `New Terminal`. \n2. Type `npm install` to install the dependencies listed in the [package.json](https://gitlab.com/gitlab-da/use-cases/remote-development/example-nodejs-express-app/-/blob/main/package.json) file.\n3. Type `npm start` to start the application.\n\nThe log will indicate that the application has started on port 3000.\n\n![log](https://about.gitlab.com/images/blogimages/server_log.png){: .shadow}\n\nYou can now access your application by opening the browser and using the workspace URL. Change the number before ‘workspace’ in the URL to the port number on which your application is listening (e.g., 3000). For example, if your workspace URL is `https://\u003Cprefix>-workspace-73241-25728545-rqvpjm.workspaces.gitlab.dev`, and your application is running on port 3000, update `\u003Cprefix>` to 3000 to access your application.\n\n## Make changes to the application and previewing the updated version\nIn the Web IDE, navigate to the `server.js` file, modify the text in line 9. \n\nAfterward, refresh the browser where your application is opened to see the applied changes. \n\n## Commit the change \n1. In the Web IDE click on the merge icon in the activity bar.\n2. Click the line with the `server.js` to view your change side by side.\n3. To stage your change, click the plus icon next to `server.js`.\n4. Type a commit message describing your change.\n5. Click Commit. \n6. Click Sync changes to push the commit to the GitLab server.\n\n  ![commit](https://about.gitlab.com/images/blogimages/commit-stage.png){: .shadow}\n\n## Explore the demo \nExplore further with this [click-through demo of workspaces](https://go.gitlab.com/qtu66q).\n\n## Try out workspaces\nRemote Development workspaces offer a convenient and efficient way to work on projects without the need for local development setups. They provide a streamlined workflow and enable developers to focus on writing code rather than dealing with complex environment setups.\n\nBy adopting workspaces, developers can collaborate effectively, improve productivity, and simplify the development process. \n\nGive workspaces a try and revolutionize your remote development experience today!\n\nCover image by \u003Ca href=\"https://unsplash.com/@pankajpatel?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Pankaj Patel\u003C/a> on \u003Ca href=\"https://unsplash.com/photos/_SgRNwAVNKw?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Unsplash\u003C/a>\n{: .note}\n",[978,9,109],{"slug":6944,"featured":6,"template":686},"quick-start-guide-for-gitlab-workspaces","content:en-us:blog:quick-start-guide-for-gitlab-workspaces.yml","Quick Start Guide For Gitlab Workspaces","en-us/blog/quick-start-guide-for-gitlab-workspaces.yml","en-us/blog/quick-start-guide-for-gitlab-workspaces",{"_path":6950,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6951,"content":6956,"config":6961,"_id":6963,"_type":14,"title":6964,"_source":16,"_file":6965,"_stem":6966,"_extension":19},"/en-us/blog/quickly-onboarding-engineers-successfully",{"title":6952,"description":6953,"ogTitle":6952,"ogDescription":6953,"noIndex":6,"ogImage":6033,"ogUrl":6954,"ogSiteName":670,"ogType":671,"canonicalUrls":6954,"schema":6955},"How to quickly (and successfully) onboard engineers","It's a tough hiring market today. Here's how GitLab gets engineers onboard fast and sets them up for success.","https://about.gitlab.com/blog/quickly-onboarding-engineers-successfully","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to quickly (and successfully) onboard engineers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2022-07-21\",\n      }",{"title":6952,"description":6953,"authors":6957,"heroImage":6033,"date":6958,"body":6959,"category":791,"tags":6960},[6710],"2022-07-21","\n\nNo one ever said hiring was easy. As a matter of fact, talent hiring and retention are some of the hardest aspects to get right for any software company. \n\nAccording to [a recent article at Developer Pitstop](https://developerpitstop.com/how-long-do-software-engineers-stay-at-a-job/) the average engineer will only stay at a job for an average of two years before moving on, and this tenure is shrinking as time goes on. \n\nWhen we look at the average timeline for engineers in a new role we usually see something like:\n\n> - Learning and adaptation (3 / 6 months):\n>   Coming to grips with the new company, team, and their processes.\n> \n> - Creating value for the organization (6 / 12 months):\n>   Adding value to the business by becoming a functioning member of the team.\n> \n> - Becoming a role expert (6 / 18 months):\n>   Owning the role completely and helping to shape the direction of the team.\n\n## Software engineer onboarding\n\nAt GitLab we pride ourselves on an outstanding onboarding process to reduce the amount of time an engineer will spend in the `learning and adaptation` bracket and accelerate their evolution into the `creating value for the organization` bracket. We do this for two main reasons:\n\n- **Quicker integration**: We aim to have engineers ship production code in less than one week, and fully onboard them in less than three months.\n- **Reduce turnover**: Engineers who have an awesome onboarding experience tend to stay with the same company longer.\n\n**The bottom line is that with these benefits, investing in an amazing onboarding process gives you the highest ROI on your hiring initiatives.**\n\nSo, now that we know **why** we need to ensure we onboard quickly and correctly, let's talk about **how** we do it at GitLab. \n\n## Software engineer onboarding process: An overview\n\n- 💯 Before day one \n- 💥 It's all about the onboarding issue\n- 🥂 Pick the right onboarding buddy\n- 👌 Pair, pair, and more pairing\n- 🖐 All the coffee chats\n- 🤘 Tailor the experience to the role\n- 🚢 Ship some code in a week or less\n- 💬 Let's get (and give) some feedback\n\n![onboarding](https://about.gitlab.com/images/blogimages/onboarding.png){: .shadow}\n\n## 💯 Before day one\n\nThe best processes for onboarding software engineers start as soon as the candidate has officially accepted the offer. This is done in a few ways:\n\n- An onboarding issue is created with tasks for the hiring manager, their buddy, and People Experience (HR).\n- The hiring manager selects the right onboarding buddy for the engineer and communicates expectations (more on this later).\n- The engineer's accounts (Email, GitLab account, Okta, etc) are created and their hardware is shipped.\n- GitLab reaches out via email to let the candidate know what the onboarding process looks like.\n- The hiring manager reaches out to the engineer via email to set up a coffee chat on Day 1 as the initial process might seem overwhelming.\n\nFor us, the most important aspect is communication with the engineer to ensure they are set up for success. We provide them with access to their onboarding issue, helpful video guides for getting started, and a primer on how to navigate our [handbook like a pro](https://about.gitlab.com/handbook/people-group/general-onboarding/). The reason this is so important is that we know if we stop communicating with the engineer after signing, we are at risk of creating uncertainty, introducing inefficiency, or even losing them to another offer during that time.\n\n## 💥 It's all about the onboarding issue\n\nAt GitLab, our [onboarding issue](https://about.gitlab.com/handbook/people-group/general-onboarding/#onboarding-issue-template-links) is the most effective tool we have for successfully onboarding a new engineer quickly. Hiring managers use this issue almost exclusively, both for building momentum and for following our value of transparency. We use this issue, instead of Slack or email, to create a single source of truth for everyone during the process and to prevent fragmented communication. For anyone new at GitLab, the first few weeks can seem like a lot to get on top of, so the hiring manger wants to be mindful of opportunities to consolidate communication and reduce context switching. \n\nOur onboarding issues are confidential because they contain sensitive account information, but the templates of the issue are [public](https://about.gitlab.com/handbook/people-group/general-onboarding/#onboarding-issue-template-links) and they look something like this:\n\n```\n- Accounts and access\n- Day 1: Getting started: Accounts and paperwork\n- Day 2: Remote working and our values\n- Day 3: Security & compliance\n- Day 4: Social & benefits\n- Day 5: Git & push some code\n- Weeks 2 - 4: Explore\n- Job-specific tasks\n```\n\nAs a hiring manager, you want to ensure that you have fleshed out the `job specific tasks` ahead of time with things that are important for the specific role the engineer will be working in. This will generally include things like ensuring they have database access, pointing them to the working groups that will support their work, and letting them know the right Slack channels to support their development. \n\n## 🥂 Pick the right onboarding buddy\n\nThe advantages of the buddy system have been [well documented for years](https://www.pmi.org/learning/library/implementing-buddy-system-workplace-9376). At GitLab we lean heavily on the onboarding buddy [model](https://about.gitlab.com/handbook/people-group/general-onboarding/onboarding-buddies/) and rather than having multiple people support the new engineer, it will generally be the hiring manager and a single buddy. \n\nThe advantages of an onboarding buddy at GitLab are several:\n\n- **Domain expert**: A onboarding buddy knows the domain the new engineer is going to be working in. They have already written, reviewed, and merged code into production in the same way we want the new engineer to. They know the process, pitfalls, and gotchas of the domain. \n- **Single context / Accountabilibuddy**: A single onboarding buddy drastically reduces context switching and \"paralysis by analysis.\" They know they always have someone to ask and this creates a psychologically safe space for them. GitLab can often be a scary environment to navigate when you are new due to impostor syndrome and we want to curb that. \n- **GitLabisms**: At GitLab, we have code and then we have \"GitLabisms.\" These are things that are specific to GitLab, be it workflows or custom tooling. These are often more complicated to become familiar with than the code itself. The onboarding buddy should have experience with these already and be able to point the engineer in the right direction when they are stuck. \n- **Mentor**: Mentoring is one of the single best things an engineer can do to grow themselves and become more sure of their own skills. By being an onboarding buddy, they are given a growth opportunity to cover their own blindspots and upskill. \n\nAs a rule of thumb, the onboarding buddy should ideally be someone from the engineer's new team who is working in the same domain, i.e. a senior frontend engineer mentors a new intermediate frontend engineer, both of which are from the same team. While this rule is not set in stone, it is often less effective to have an onboarding buddy be cross-team due to a lack of domain expertise.\n\n## 👌 Pair, pair, and more pairing\n\nPairing when programming and when working on tasks is a very effective way to help new engineers build up their knowledge without needing to pour through documentation. \n\nIn general, we would recommend that the engineer pair with their onboarding buddy on their first few merge requests to get used to the workflow and pitfalls of working with the GitLab Development Kit. But this is not where it should stop. We encourage pairing across the board at GitLab either via open pairing sessions such as our Frontend Pairing office hours, having a manager pair with an engineer, or pairing with a stable counterpart such as your team's UX designer. \n\nWhen it comes to onboarding, pairing is helpful. We do this because we want to:\n\n- **Create psychological safety**: We all feel impostor syndrome. This is worse when you're new to a job and don't know the ecosystem yet. Regular pairing helps to undo that worry as you see people are just people and even staff/principal engineers forget the closing brace!\n- **Create relationships/network**: In an all-remote company, it becomes important to know who to reach out to in moments of need. Regular pairing helps to foster these relationships and creates a safety net with your peers. \n- **Demonstrate our values**: We believe in [CREDIT](https://handbook.gitlab.com/handbook/values/) at GitLab. Regular pairing supports all our core values and helps to encourage us to be mindful of them when working. \n- **Give and get real-time feedback**: When pairing, we can get real-time feedback on our process and how we're approaching solutions. This is extremely important for new engineers who might not be familiar with core GitLab concepts such as [iteration](https://handbook.gitlab.com/handbook/values/#iteration) (\"How can we break this down?\").\n\n## 🖐 All the coffee chats\n\nBeing distributed means we do communication differently at GitLab. One key to successfully onboarding a new engineer is to get them comfortable with our communication style. \n\nTo do this, we encourage regular [coffee chats](https://about.gitlab.com/company/culture/all-remote/informal-communication/#coffee-chats) and a culture of zero shame about it. \n\nEncourage your new hire to set up regular coffee chats with people across the company to help build rapport and become comfortable with GitLab as a whole. \n\nTo help empower new hires, have them ask the following question in their initial 10  - 15 chats:\n\n> What is the one thing I can do to be successful at GitLab?\n\n## 🤘 Tailor the experience to the role\n\nAs a hiring manger, you need to understand that people learn and grow in different ways. No single method will work for everyone and it is your job to ensure your new hire feels supported in how they want to learn. \n\nDuring the onboarding, observe your new hire and touch base with them in your weekly 1:1 for what they are and **are not** enjoying about the experience so far. Once you have this information, iterate on it and tailor their onboarding to include more of what they prefer. \n\nAsk constructive questions that can have actionable tasks each week to ensure a better process for them:\n\n> Do you want to pair more? Do you want more alone time? Are there particular areas you need more guidance in? Are there things I can do to better support you?\n\nYou should aim to strike a balance during their onboarding for a mixture of practical work and time dedicated to studying. Work with the direct report to establish the best balance for them as an individual. \n\n## 🚢 Ship some code in a week or less\n\nThis is arguably the most important aspect of successfully onboarding an engineer and setting them up for success. The sooner they can push code to production, the sooner they can begin to refine their skills and work effectively with the team. \n\nThe best software companies in the world set a timeline of shipping code in a week. At GitLab, this is not a hard-and-fast rule, but in the **Create** stage is what we strive for. \n\nTo ensure an engineer can ship code within a week, we need to ensure they are supported in a few ways:\n\n- **Tooling**: At GitLab we have a fantastic [local development kit](https://gitlab.com/gitlab-org/gitlab-development-kit) which sets up an engineer to begin delivering code. We support this kit heavily as a first-class citizen and are constantly refining the tooling and [docs](https://gitlab.com/gitlab-org/gitlab-development-kit/-/tree/main/doc) to ensure everyone can contribute. For a new hire, consider having their first pairing session to be setting up their GDK – this will get them one step closer to shipping quality code. \n- **Dev process**: At GitLab, we always strike to [break down work into its smallest deliverable](https://about.gitlab.com/handbook/product/product-principles/#the-minimal-viable-change-mvc) that can be picked up by an engineer without deep contextual understanding. We do this to support the open source community as much as our own engineers. \n\n## 💬 Let's get some feedback\n\nAs a hiring manager, you want to ensure you build a stable feedback loop into your processes and this includes onboarding. During your 1:1s you should include a weekly feedback cycle for both **you** and your direct report. \n\nThese feedback cycles should take the form of:\n\n- **Appreciation (Collaboration / Results / Diversity / Iteration / Transparency)**: A moment of appreciation for something positive that is highlighted inline with our values. \n- **Coaching (Collaboration / Results / Diversity / Iteration / Transparency)**: A growth opportunity that is highlighted inline with our values. \n\nThese weekly feedback loops allow the engineer to highlight things that could be done better in both the context of the onboarding and their day-to-day experience. \n\nLastly, it is optional but encouraged to hold an onboarding retrospective when the initial onboarding issue is closed with the following points to talk through:\n\n- What went well?\n- What didn't go so well? \n- What could be improved? \n- Action items\n\n## 💾 TL;DR \n\nThe most successful software companies have a solidified onboarding process and continue to expand on it, setting up both the company and engineers for long-term success. The above methods are how we do it at GitLab. \n\n## 💻 Remote development and the developer experience\n\nAt GitLab we have recently been hiring for our [Remote Development effort](https://about.gitlab.com/direction/create/ide/remote_development/) and many of these items are in play with the engineers we are bringing into the company. We want to improve these processes to make onboarding even easier, mitigating the need for even setting up a specific local development toolchain to be able to ship production code. \n\nIf you think you might be interested in a role at GitLab working on Remote Development, check out our open listings [here](https://boards.greenhouse.io/gitlab/jobs/6201785002).\n\nRead more about [leading endingeering teams](/blog/cadence-is-everything-10x-engineering-organizations-for-10x-engineers/).\n",[9,1158,749],{"slug":6962,"featured":6,"template":686},"quickly-onboarding-engineers-successfully","content:en-us:blog:quickly-onboarding-engineers-successfully.yml","Quickly Onboarding Engineers Successfully","en-us/blog/quickly-onboarding-engineers-successfully.yml","en-us/blog/quickly-onboarding-engineers-successfully",{"_path":6968,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6969,"content":6975,"config":6981,"_id":6983,"_type":14,"title":6984,"_source":16,"_file":6985,"_stem":6986,"_extension":19},"/en-us/blog/r2devops-open-source-hub-cicd",{"title":6970,"description":6971,"ogTitle":6970,"ogDescription":6971,"noIndex":6,"ogImage":6972,"ogUrl":6973,"ogSiteName":670,"ogType":671,"canonicalUrls":6973,"schema":6974},"How to create a hub of GitLab CI/CD jobs with R2Devops","Here's how R2Devops and GitLab can work together to streamline CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682395/Blog/Hero%20Images/r2devops1.png","https://about.gitlab.com/blog/r2devops-open-source-hub-cicd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to create a hub of GitLab CI/CD jobs with R2Devops\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Salerno\"}],\n        \"datePublished\": \"2022-07-27\",\n      }",{"title":6970,"description":6971,"authors":6976,"heroImage":6972,"date":6978,"body":6979,"category":726,"tags":6980},[6977],"Sandra Salerno","2022-07-27","\n\nCI/CD has changed our development processes, but it hasn’t simplified them in every aspect. The amount of knowledge necessary to implement and maintain your first CI/CD pipelines is huge, and the time you need to invest in it is consequential. Partnering with GitLab, R2Devops aims to simplify CI/CD onboarding by creating a hub of CI/CD jobs. In this blog post I'll show you how to use R2DevOps with GitLab to add jobs to an open source hub.\n\n## A collaborative hub of open source jobs\n\nCollaboration is core to our development processes. On a daily basis, we use open source software and code and ask our teammates for review. Working together to achieve common goals helps us to develop better products and improve continuously. With R2Devops, you’ll find a [collaborative library of open source CI/CD jobs](https://r2devops.io/_/jobs). \n\nYou can save a lot of time by using jobs from an open source library. You won’t have to write your pipeline from scratch for every new project, and you can focus on what you like doing, which is coding.\n\nAnd, of course, working together is working smarter. R2Devops empowers collaboration by allowing developers to add their own jobs into the library directly from their GitLab account. \n\n## How to add a job in R2Devops\n\n![Adding a job](https://about.gitlab.com/images/blogimages/r2devops2.gif){: .shadow.small.left}\n\nLink your GitLab account to [R2Devops](https://r2devops.io), fill in the URL of your repository, the path of your job, and give it a name. Once you click on import, our crawler will check three files:\n\n1.) the jobname.yml/jobname.yaml \n\n2.) the changelog.md\n\n3.) the readme.md. \n\nThe crawler process is explained in detail [in our documentation](https://docs.r2devops.io/crawler/). In short, without a jobname.yml file, R2 won’t be able to import your job. The changelog.md allows R2 to check your job’s versions, and the readme.md is used to build the documentation for each version of your job.\n\nEt voilà, anyone can see your job in R2Devops and easily use it in their pipeline.\n\nOnce your job is in R2Devops, you can add information such as the license, description, and specify labels. This helps other users understand what your job can be used for. That data and the job’s code appears in the documentation. 👇\n\n![Data in the documentation](https://about.gitlab.com/images/blogimages/r2devops3.png){: .shadow.small.left}\n\n### Include any jobs in one line with GitLab Include keyword feature\n\n[In January 2019, GitLab released a feature](https://about.gitlab.com/releases/2019/01/22/gitlab-11-7-released/) that simplifies the CI/CD keyword [Include](https://docs.gitlab.com/ee/ci/yaml/index.html#include) process. Rather than copying the code of a job every time you need to create a new pipeline, you can instead indicate to your pipeline where the source is located.\n\nFor example, this:\n\n![pre-include](https://about.gitlab.com/images/blogimages/r2devops4.png){: .shadow.small.left}\n\ncan become the below:\n\n![post-include](https://about.gitlab.com/images/blogimages/r2devops5.png){: .shadow.small.left}\n\nThis feature is used in R2Devops. Every resource added in the library gets its own _Include_ link, so anyone can implement it in one line in their CI/CD. It also means that the file you are using is located in a unique place. Once you update it, you only have to update the include link by modifying the version of the job you want to use. You don’t have to update the whole code in every pipeline you own.\n\n### Customize the job you need using GitLab variables\n\nMost of R2Devops’ jobs are plug and play, meaning you can add the _Include_ link of the job in your pipeline, launch it, and it will work. We understand every project is different and has its own requirements, which is why we defined variables for each job. \n\n[GitLab CI/CD variables](https://docs.gitlab.com/ee/ci/variables/) and YAML overrides allow you to customize the jobs and make them fit your project easily. \n\n![How to customize](https://about.gitlab.com/images/blogimages/r2devops6.png){: .shadow.small.left}\n\nWe have included two jobs from the hub as examples: [python_test](https://r2devops.io/_/r2devops-bot/python_test) code and[sls-scan](https://r2devops.io/_/r2devops-bot/sls_scan). Using the variables defined in the documentation for each job, you can personalize the behavior of these jobs to fit our project requirements.\n\n## Matching GitLab's values of open source and transparency\n\nR2Devops joined the [GitLab Alliance Partner Program](/handbook/alliances/) in March. Both solutions share the same goal – to simplify developer lives by improving development processes. If you want to take part in the development of the open source CI/CD community of GitLab or give feedback on the solution, please [join the R2Devops community on Discord.](https://discord.r2devops.io?utm_medium=website&utm_source=r2devops&utm_campaign=button https://discord.r2devops.io?utm_medium=gitlab&utm_source=blog&utm_campaign=articleR2Devops)\n\nCover image by [Duy Pham](https://unsplash.com/@miinyuii) on [Unsplash](https://unsplash.com)\n{: .note}\n\n\n",[109,9,282],{"slug":6982,"featured":6,"template":686},"r2devops-open-source-hub-cicd","content:en-us:blog:r2devops-open-source-hub-cicd.yml","R2devops Open Source Hub Cicd","en-us/blog/r2devops-open-source-hub-cicd.yml","en-us/blog/r2devops-open-source-hub-cicd",{"_path":6988,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":6989,"content":6994,"config":6999,"_id":7001,"_type":14,"title":7002,"_source":16,"_file":7003,"_stem":7004,"_extension":19},"/en-us/blog/reduce-cycle-time-digital-transformation",{"title":6990,"description":6991,"ogTitle":6990,"ogDescription":6991,"noIndex":6,"ogImage":1193,"ogUrl":6992,"ogSiteName":670,"ogType":671,"canonicalUrls":6992,"schema":6993},"How to reduce cycle time when faced with the digital transformation","With every industry facing change at an accelerated pace, how do you quickly deliver value to customers?","https://about.gitlab.com/blog/reduce-cycle-time-digital-transformation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to reduce cycle time when faced with the digital transformation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Jeremiah\"}],\n        \"datePublished\": \"2019-03-19\",\n      }",{"title":6990,"description":6991,"authors":6995,"heroImage":1193,"date":6996,"body":6997,"category":679,"tags":6998},[1198],"2019-03-19","\n\nOver the past several years, the “hot topic” in the tech world has been digital\ntransformation, the act of accelerating software innovation to deliver value to\ncustomers at high speed. Technology and innovation create disruptions across every\nindustry – from retail to financial services – meaning everyone faces change at\na faster pace. [A recent study by the\nWorld Economic Forum](http://reports.weforum.org/digital-transformation/) found\nthat “digital transformation” impacts almost every sector and offers critical\nexamples of how mobile devices, internet of things, machine learning, and big\ndata collectively reshape our future. If you're an IT leader, you may ask yourself,\n“What is fast and how does my team go faster?”\n\n## What _is_ fast?\n\nThe first step in preparing for the digital transformation is to look at how you\nmeasure speed: cycle time.\n\niSixSigma has a great\n[definition of cycle time](https://www.isixsigma.com/dictionary/cycle-time):\n“The total time from the beginning to the end of your process, as defined by you\nand your customer. Cycle time includes process time, during which a unit is acted\nupon to bring it closer to an output, and delay time, during which a unit of work\nis spent waiting to take the next action.” In a nutshell, cycle time is the total\nelapsed time to move a unit of work from the beginning to the end of a physical process.\n\n>In a nutshell, cycle time is the total\nelapsed time to move a unit of work from the beginning to the end of a physical process.\n\nIt’s important to note that cycle time is not the same as\n[lead time](https://www.linkedin.com/pulse/what-lead-time-why-important-how-do-you-reduce-roland-lester/).\nCycle time tells you how efficient your development and delivery processes are,\nand lead time tells you how long customers wait for a new feature. If you have a\nlot of ideas in your backlog, you could have a short cycle time, but a long lead\ntime due to the backlog. However, if you can improve your DevOps lifecycle to\nachieve a fast cycle time, you can then rapidly respond to new business priorities.\n\n## How does your team go faster?\n\nSo, now you know how to measure speed, how do you reduce your cycle time, let\nalone your lead time?\n\n### Take stock first\n\nIt starts with understanding where your current delivery process has problems –\nwhere you’re creating\n[bottlenecks](https://about.gitlab.com/solutions/remove-bottlenecks/index.html),\nrework, or merely waiting for someone to do something. The objective of\n[value stream management](/solutions/value-stream-management/) is to define,\nmeasure, and improve the flow of value to your customers. In the case of IT and\napplication delivery, value stream management starts with your backlog of feature\nrequests and ends with the delivery of the features to your users.\n\n### Here’s a recipe to reduce cycle time:\n\n1. Measure your cycle time and lead time (cycle time is your process and lead time is what customers see).\n1. Identify the bottlenecks in your value stream (those things that stretch your cycle time).\n1. Improve your processes, automate, and streamline your value stream.\n1. Repeat step 1.\n\nIf you’re concerned about how the digital transformation will impact your business, I\nhighly recommend the\n[Digital Transformation Initiative Executive Summary](http://reports.weforum.org/digital-transformation/wp-content/blogs.dir/94/mp/files/pages/files/dti-executive-summary-20180510.pdf),\na fantastic report that’ll provide you with a comprehensive understanding of how\nit will create business value. As you improve your cycle time, you’ll be able to\nlower your lead time, because your delivery processes will be faster and more\nefficient. The key is to measure, understand, and improve your process.\n\nAre you ready to tackle the digital transformation? [Just commit.](/blog/strategies-to-reduce-cycle-times/)\n",[683,9],{"slug":7000,"featured":6,"template":686},"reduce-cycle-time-digital-transformation","content:en-us:blog:reduce-cycle-time-digital-transformation.yml","Reduce Cycle Time Digital Transformation","en-us/blog/reduce-cycle-time-digital-transformation.yml","en-us/blog/reduce-cycle-time-digital-transformation",{"_path":7006,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7007,"content":7013,"config":7018,"_id":7020,"_type":14,"title":7021,"_source":16,"_file":7022,"_stem":7023,"_extension":19},"/en-us/blog/reduce-it-costs",{"title":7008,"description":7009,"ogTitle":7008,"ogDescription":7009,"noIndex":6,"ogImage":7010,"ogUrl":7011,"ogSiteName":670,"ogType":671,"canonicalUrls":7011,"schema":7012},"How to reduce IT costs","Four ways organizations can spend less on IT and more on innovation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680558/Blog/Hero%20Images/reduce-it-costs.jpg","https://about.gitlab.com/blog/reduce-it-costs","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to reduce IT costs\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-04-11\",\n      }",{"title":7008,"description":7009,"authors":7014,"heroImage":7010,"date":7015,"body":7016,"category":679,"tags":7017},[788],"2019-04-11","\n\nEfficient organizations do _more_ with _less_ – it's just simple math, really.\nBut even as teams try to stay lean and agile, some IT budgets are anything but. In a [recent survey that analyzed IT spending](https://searchcio.techtarget.com/magazineContent/How-Company-Size-Relates-to-IT-Spending)\nbased on company size, small companies spend on average 6.9 percent of their revenue on IT\n(enterprise spending is usually around 3 percent). Out of this IT spending, [more than 70 percent goes toward maintenance](https://phoenixnap.com/blog/it-cost-reduction-strategy) – just keeping things running.\n\nIT cost reduction could help fund the innovations all companies need to stay competitive,\nbut therein lies the problem. How do you prioritize what stays and what goes when _everything_ feels important?\nReducing IT costs doesn't happen in a vacuum – teams across the organization depend on these decisions.\n\n## Where do I start?\n\n### Reduce on-premise IT\n\n[On-premise IT has several costs](https://ianmartin.com/10-strategies-top-cios-use-reduce-costs/):\nthe servers themselves, power and cooling, staff to maintain them, software licenses, and the\nadditional leased space needed to house it all. [Virtualization hosts multiple virtual instances](https://www.bmc.com/blogs/6-ways-reduce-ongoing-maintenance-management-costs/)\n(Virtual Machines, VMs) of an operating environment on the same machine, reducing the\nnumber of physical servers needed. Virtual environments offer more flexibility, containers\nthat run independently, and fewer costs over the long term.\nTaking on cloud-based architecture embodies doing more with less.\n\n### Evaluate toolchain-management costs\n\nThose that spend more on their IT needs aren't typically the top performers – they just have more stuff.\nEvery application and plugin creates another potential point of failure, and added complexity\nalmost always spoils the efficiency party. Those in charge of IT have to keep up with more\nmaintenance, more patches, more logins, which in turn leads to more IT staff and even more\ncomplexity. Look at your toolchain – plugins, applications, and licenses – and evaluate the costs.\nSeveral \"inexpensive\" licenses that look harmless in micro add up quickly in macro.\nThis doesn't even factor the ongoing costs (upgrades, management, additional staff).\n\nSo much are you paying for your toolchain? We created this handy calculator that shows the\nannual cost for 100 users using some of the most common DevOps tools.\n\n[How much is your toolchain?](/calculator/roi/)\n{: .alert .alert-gitlab-purple .text-center}\n\n### Follow best practices to reduce downtime\n\nDowntime is every team's tech nightmare. It's estimated that [the cost of downtime for an\naverage-sized company is over $7,000 per minute](https://www.datafoundry.com/blog/6-cost-reduction-strategies-enterprise-IT)\n(yikes), and it can have far-reaching implications: worse customer relationships, employee turnover,\nand it can scare off investors, just to name a few.\nWhen facing a budget crunch, it might seem minor to skimp on a few steps when you're confident\nof the outcomes, but doing it right the first time saves much more in the long run.\nFollowing best practices like user testing and code reviews takes time up front, but they lower the chance of costly mistakes.\n\n### Modernize applications and migrate to lower-cost infrastructures\n\nConsolidating tools and an aggressive approach to application modernization are going to\nbe the greatest opportunities for enterprises to save budget dollars.\nA recent survey of top enterprise architects found that [36 percent cited application rationalization as the primary initiative they're working on](https://www.itbusinessedge.com/slideshows/show.aspx?c=93453).\nAnother survey of 250 senior IT executives found that 58 percent of them said\n[the best way to cut IT costs was to modernize or migrate existing applications to a lower-cost IT infrastructure](https://www.itbusinessedge.com/cm/blogs/vizard/application-modernization-tops-it-agenda/?cs=41480).\n\nReducing IT costs is essential for scale and funding innovations that keep organizations competitive,\nbut making the right cuts requires prioritization. For savings in the long term keep these four objectives in mind:\n\n*   Reduce on-premise IT.\n*   Evaluate toolchain-management costs.\n*   Follow best practices to reduce downtime.\n*   Modernize applications and migrate to lower-cost infrastructures.\n\nIT cost reduction directly correlates to increased revenue, but it isn't always easy. It just requires a little commitment.\n\n[Just commit.](/blog/application-modernization-best-practices/)\n{: .alert .alert-gitlab-purple .text-center}\n\nCover image by [Thomas Jensen](https://unsplash.com/photos/qTEj-KMMq_Q?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/computer-servers?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[1041,9],{"slug":7019,"featured":6,"template":686},"reduce-it-costs","content:en-us:blog:reduce-it-costs.yml","Reduce It Costs","en-us/blog/reduce-it-costs.yml","en-us/blog/reduce-it-costs",{"_path":7025,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7026,"content":7032,"config":7037,"_id":7039,"_type":14,"title":7040,"_source":16,"_file":7041,"_stem":7042,"_extension":19},"/en-us/blog/remote-work-facilitates-devops",{"title":7027,"description":7028,"ogTitle":7027,"ogDescription":7028,"noIndex":6,"ogImage":7029,"ogUrl":7030,"ogSiteName":670,"ogType":671,"canonicalUrls":7030,"schema":7031},"People agree that remote work in DevOps creates a stronger DevOps culture","What makes remote work more conducive to DevOps adoption? Here's a look at one of the findings of our 2018 Global Developer Report.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680149/Blog/Hero%20Images/devopsremotework.jpg","https://about.gitlab.com/blog/remote-work-facilitates-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"People agree that remote work in DevOps creates a stronger DevOps culture\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2018-04-17\",\n      }",{"title":7027,"description":7028,"authors":7033,"heroImage":7029,"date":7034,"body":7035,"category":679,"tags":7036},[702],"2018-04-17","\n_Our [2022 Global DevSecOps Survey](/developer-survey/) is out now! Learn the latest in DevOps insights from over 5,000 DevOps professionals._\n\nAccording to our [2018 Global Developer Report](/developer-survey/previous/2018/), remote teams tend to trend higher in visibility and DevOps satisfaction compared to in-office teams, suggesting that a remote workplace culture is more conducive to DevOps adoption.\n\n![The differences between remote and in-office teams](https://about.gitlab.com/images/blogimages/devopsremotestats.png){: .shadow.medium.center}\n\nAs a [remote-only](/company/culture/all-remote/) company, this finding naturally piqued our interest. We started thinking about the traits of a remote team and how these characteristics set up operations and development teams for success.\n\n## The challenges of DevOps adoption\n\nOne of the greatest difficulties an organization faces when adopting a DevOps model is a [resistance to culture change](https://www.cio.com/article/3235726/application-development/5-hurdles-to-adopting-devops.html). Because DevOps requires teams to collaborate and communicate in new ways (and at an increasing frequency), historically siloed teams may have trouble adjusting. This type of radical shift in culture can be too difficult for a team to handle and may result in an increase in friction and frustration.\n\nHow can teams that have traditionally worked alongside each other – [but not together](https://www.wired.com/insights/2015/03/culture-war-struggle-adopt-devops/) – suddenly adopt a model that encourages them to contribute to a single conversation across every stage?\n\n## Remote work paves the way to a smoother transition\n\nIn our survey we learned that [20 percent of respondents](/developer-survey/previous/2018/) say most or all of their development team works remotely. Every remote worker knows the importance of [communicating effectively](/blog/remote-communication/) and frequently to ensure that others are aware of decisions and progress. Without the convenience of physical proximity, working remotely requires a commitment to open discussion and an understanding that team members must be able to easily view projects and receive updates. Furthermore, remote teams use tools to work concurrently, decreasing the challenges of siloed workflows.\n\nAn effective remote culture embraces:\n\n- efficiency\n- collaboration\n- visibility\n\nWhen operations and development teams already have a culture founded on trust and transparency, they can more easily adopt a model that fosters cross-functional communication and workflows.\n\nRemote teams are already accustomed to transparency, collaboration, and visibility, making a DevOps adoption a seamless transition. Because teams must document discussion conclusions, an inherent benefit of working remotely is complete real-time visibility of all projects and activities, an advantage of the DevOps model.\n\n## How can in-office teams ease DevOps adoption?\n\nWhile a remote workplace culture appears to create a solid foundation upon which a DevOps model can thrive, we concede that remote teams can still encounter challenges to adoption. Poor communication, internal conflict, and a lack of defined processes can hinder any team. However, there are insights that in-office teams can gain from these findings. Because culture is the underpinning of successful DevOps adoption, in-office teams can ease challenges by encouraging teams to work concurrently and by transparently documenting conversations and decisions. Furthermore, a shift towards empathy can help teams gain respect for the work that others accomplish, a change that can increase collaboration and decrease friction.\n\nBy creating a collaborative culture, an organization can facilitate a smoother [transition to a DevOps model](/blog/a-snapshot-of-modern-devops-practices-today/).\n\nDoes your development team work remotely? Let’s chat about DevOps and remote working! Tweet us [@gitlab](https://twitter.com/gitlab).\n\n[Cover image](https://www.pexels.com/photo/high-angle-view-of-workplace-306533/) licensed\nunder [CC X](https://www.pexels.com/photo-license/)\n{: .note}\n",[3350,9,681],{"slug":7038,"featured":6,"template":686},"remote-work-facilitates-devops","content:en-us:blog:remote-work-facilitates-devops.yml","Remote Work Facilitates Devops","en-us/blog/remote-work-facilitates-devops.yml","en-us/blog/remote-work-facilitates-devops",{"_path":7044,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7045,"content":7051,"config":7057,"_id":7059,"_type":14,"title":7060,"_source":16,"_file":7061,"_stem":7062,"_extension":19},"/en-us/blog/reviewer-roulette-one-year-on",{"title":7046,"description":7047,"ogTitle":7046,"ogDescription":7047,"noIndex":6,"ogImage":7048,"ogUrl":7049,"ogSiteName":670,"ogType":671,"canonicalUrls":7049,"schema":7050},"Reviewer Roulette: (Just about) one year on","Learn how Reviewer Roulette has evolved at GitLab over the last year.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672195/Blog/Hero%20Images/play-reviewer-roulette.jpg","https://about.gitlab.com/blog/reviewer-roulette-one-year-on","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Reviewer Roulette: (Just about) one year on\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nick Thomas\"}],\n        \"datePublished\": \"2019-10-23\",\n      }",{"title":7046,"description":7047,"authors":7052,"heroImage":7048,"date":7054,"body":7055,"category":791,"tags":7056},[7053],"Nick Thomas","2019-10-23","\nJust over a year ago, [Dennis Tang](/company/team/#dennis)\nintroduced us to [Reviewer Roulette](/blog/play-reviewer-roulette/).\nThis was a shiny new tool designed to help us to find reviewers for our code.\nAt the time, our engineering department had around 150 people in it. At GitLab,\n[all our engineers are reviewers](/handbook/engineering/workflow/code-review/#reviewer),\nbut reviews were being unevenly distributed across them.\n\nA year on, and with more than 380 people in engineering available to review,\nwe're still using a form of Reviewer Roulette – but its implementation, and how\nwe interact with it, has changed significantly. So, what's changed, and what's\nstayed the same?\n\n## The good\n\nFirst off, roulette works really well. Code reviews can be time-consuming, and\nthey're a major part of quality control at GitLab, so it's crucial that we\ndistribute the load – research shows that [review quality nosedives](https://smartbear.com/learn/code-review/best-practices-for-peer-code-review/)\nif you spend too much time doing it. It's even more\nimportant for our maintainers. We try to maintain a ratio of engineers to maintainers of around\n4:1, but if half of the reviews go to a quarter of the maintainers, some will\nexperience it as 6:1, while others will experience it as 2:1.\n\nAlso, people could become familiar with certain reviewers and maintainers and\nhabitually assign their work to the same people. This means that people who had\nbeen maintainers for longer tended to get more reviews. Without the\nrandomization effect of Reviewer Roulette, this led to the creation of knowledge\nsilos, where knowledge about a particular subject would be concentrated in a few\nindividuals, rather than being spread across the organization.\n\nRoulette solved this for us with almost no cognitive load, and could scale\neffortlessly as our engineering team expands significantly. Sometimes, I first\nlearned someone new had joined the company through a review suggestion. The\nnumber and type of reviews a merge request needed was also increasing – I might\nneed to find a reviewer and maintainer for frontend, backend, QA, database,\ndocumentation, and UX concerns before merging. It's a lot to keep track of\nmanually!\n\n## The bad\n\nDespite the advantages of Reviewer Roulette, I used it inconsistently after a\nfew months, and never actually contributed any improvements to the code. The\nintegration with Slack didn't fit my workflow very well because a chat channel\nis the last place I want to be when working on code! I like to treat Slack as\nthe [informal, asynchronous](/handbook/communication/#slack) communication\nchannel it is designed to be, but it is too easy to be sidetracked by ongoing\nconversations when popping in to get a reviewer recommendation.\n\nThen, we began running into deployment problems, and sometimes Reviewer Roulette\njust wasn't available at all. It only took a few failed attempts before I fell\nout of the habit of trying to use it, and we never did get around to making the\ndeployment work with Auto DevOps.\n\nIt turns out that I wasn't the only one having trouble with this iteration of Reviewer Roulette – we found\nthat backend reviews were [very unevenly distributed](https://gitlab.com/gitlab-org/gitlab-foss/issues/53119#note_111796691). Reviewer Roulette wasn't being used widely enough across GitLab for us to experience\nall the benefits, and as we geared up to add many more maintainers, fixing\nthis tool became very important.\n\n## The fix\n\nIn the interim, staff backend engineer on Delivery, [Yorick Peterse](/company/team/#yorickpeterse), introduced\n[Danger bot](https://github.com/danger/danger) into GitLab's CI pipeline and\nused it to enforce a fine set of coding standards that we couldn't quite express\nwith Rubocop.\n\nThe new bot would leave polite messages on our MRs, asking us to write\n[better commit messages](https://docs.gitlab.com/ee/development/contributing/merge_request_workflow.html#commit-messages-guidelines),\nor to seek database review if we'd changed any files in `db/`. That last part got me\nthinking: Why couldn't the Danger bot pick a potential database reviewer for us at the same\ntime? What was stopping it from detecting backend, frontend, or documentation\nchanges, and using Reviewer Roulette to choose reviewers and maintainers right there in\nthe merge request?\n\n[Very little, it turned out](https://gitlab.com/gitlab-org/gitlab/merge_requests/13506#note_175449376):\n\n![Reviewer Roulette in Action](https://about.gitlab.com/images/blogimages/roulette-review.jpg)\n\nBy making Reviewer Roulette happen automatically in the merge request itself, we\nremoved all the barriers that were preventing us from using the tool. I no longer had to be\non Slack to find a reviewer, instead the list was right there in the merge request as\nI went to change the assignee. Danger was guaranteed to run on every pipeline –\nthere were no deployments or environments to worry about, and if it broke,\nfixing it was automatically [high priority](/handbook/engineering/workflow/#broken-master).\n\nContributing changes also became much easier – the code was right there in the\nGitLab repository, and changes took effect immediately (again, no deployments!).\n\n## What's next?\n\nThe ChatOps version of Reviewer Roulette needed access to GitLab's Slack\nworkspace to use and so it wasn't available to most of our community contributors\nbeyond the [core team](/handbook/marketing/developer-relations/core-team/). Moving Reviewer Roulette to Danger doesn't really solve this\nproblem – it doesn't work well on forks of the `gitlab-org/gitlab` project so\ncommunity contributors don't benefit. This problem is something I'd really\nlike to fix in the future, not least because I work on a fork of GitLab\nday-to-day as well.\n\nDanger is a good tool but it does have [some limitations](https://docs.gitlab.com/ee/development/dangerbot.html) –\nin particular, [`danger local`](https://danger.systems/guides/troubleshooting.html#i-want-to-work-locally-on-my-dangerfile)\ndoesn't work for GitLab. This slows down development, since you have to commit\nand push changes to your merge request before you can see the effects.\n\nAnother big problem is that this most recent iteration of Reviewer Roulette only\nworks for the `gitlab` project. None of our satellite projects - `gitaly`,\n`gitlab-workhorse`, `gitlab-pages`, `gitlab-runner`, etc. – can use this\nversion of Reviewer Roulette. Similarly, [users of GitLab haven't\nbenefited from the work we've been doing on Roulette](https://gitlab.com/gitlab-org/gitlab-foss/merge_requests/24938#note_141874188).\nIdeally, we would have built this as a feature within GitLab itself, so everyone\ncould benefit from the tool.\n\nBy building Reviewer Roulette in Danger we've been able to protype and rapidly iterate\nto a solution that is working very well for the `gitlab` project. The next steps\nare to turn Reviewer Roulette [into a feature](https://gitlab.com/groups/gitlab-org/-/epics/1823) that all users of GitLab can benefit from, perhaps by leveraging the [CODEOWNERS file](https://gitlab.com/gitlab-org/gitlab/issues/12137).\n\nDo you have any ideas on how we can better integrate Reviewer Roulette into GitLab? Let us know by commenting [in the epic](https://gitlab.com/groups/gitlab-org/-/epics/1823)\nor by opening a new issue!\n\n[Cover photo](https://unsplash.com/photos/w6OniVDCfn0) by Krissia Cruz on [Unsplash](https://unsplash.com/search/photos/roulette).\n{: .note}\n",[771,683,9],{"slug":7058,"featured":6,"template":686},"reviewer-roulette-one-year-on","content:en-us:blog:reviewer-roulette-one-year-on.yml","Reviewer Roulette One Year On","en-us/blog/reviewer-roulette-one-year-on.yml","en-us/blog/reviewer-roulette-one-year-on",{"_path":7064,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7065,"content":7071,"config":7076,"_id":7078,"_type":14,"title":7079,"_source":16,"_file":7080,"_stem":7081,"_extension":19},"/en-us/blog/revisiting-the-variables-management-workflow",{"title":7066,"description":7067,"ogTitle":7066,"ogDescription":7067,"noIndex":6,"ogImage":7068,"ogUrl":7069,"ogSiteName":670,"ogType":671,"canonicalUrls":7069,"schema":7070},"Revisiting the variables management workflow","Our users helped us identify the hurdles in the variables management experience and we used those insights to guide improvements.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098484/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_618473457_nd5Dr8kfGdrlTWLOPmDjb_1750098483284.jpg","https://about.gitlab.com/blog/revisiting-the-variables-management-workflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Revisiting the variables management workflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Veethika Mishra\"}],\n        \"datePublished\": \"2024-02-26\",\n      }",{"title":7066,"description":7067,"authors":7072,"heroImage":7068,"date":7073,"body":7074,"category":791,"tags":7075},[6396],"2024-02-26","CI/CD variables play a vital role in building and maintaining CI/CD pipelines and platforms. They are an essential part of the majority of developers’ workflows, serving a range of purposes from storing reusable information to maintaining data integrity. Given their significance, we made enhancing workflows related to CI/CD variables a priority.\nRecently, we conducted interviews with users representing different [personas](https://handbook.gitlab.com/handbook/product/personas/#list-of-user-personas) related to software development, working in teams with different structural and cultural dynamics. Our aim was to gain insights into the challenges they encounter when using and managing CI/CD variables within GitLab. The feedback helped us gain valuable perspective, guiding us toward [necessary improvements](https://gitlab.com/gitlab-org/gitlab/-/issues/418331) in these workflows. Some of the notable changes are highlighted in this blog.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/gdL2cEp3kw0?si=aNmhofDU3DsnofiP\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Better management\n\n![variables management - image 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098505/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098504762.png)\n\nEffective decision-making regarding the addition, modification, or removal of CI/CD variables hinges on understanding their purpose within a project or group. Lacking visibility into a variable's purpose can complicate these decisions. To address this challenge, we've introduced an enhancement to the variable creation process that will allow users to provide a description detailing the usage and context of a variable, reducing reliance on memory. This description will be displayed in the list, along with the other attributes of the variable. \n\n## Seamless task continuity\n\n![variables management - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098505/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098504763.png)\n\nEfficiency is paramount in software development as it allows developers to make time to focus on qualitative aspects of their work. We have changed the variable creation workflow to facilitate consecutive addition or editing of multiple variables to boost efficiency. Improved, clear notifications and contextual error messages ensure users can perform tasks without the need to repeatedly open separate forms.\n\n## Enhanced error prevention\n\n![variables management - image 3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098505/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098504764.png)\n\nHow the error messages are presented and made accessible in a workflow determines their effectiveness in error resolution. We revisited the different error states users are likely to encounter during variable creation and editing workflow and identified the improvement opportunities ranging from adding new validations and help-texts to enhancing existing error-handling states.\n\n## Share your feedback\nWe believe in taking an iterative approach to better the product. We used insights from the recent user research and our best judgment when deciding on the changes, but there’s always room for improvement. Your feedback from your experience of using the changed UI for performing the tasks in your everyday work will help us understand what’s working and what isn’t, and, therefore, decide on future iterations. Please head to our [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/441177) to share your thoughts and suggestions on the changes made.\n\n## What’s next?\nAs we work on making the existing variables workflow more usable, we’re also making progress on the [GitLab Secret Manager](https://about.gitlab.com/direction/govern/pipeline_security/secrets_management/#overview) to provide users with a more secure method for enabling GitLab, or a component built within GitLab, to connect to other systems.\n\nThere’s an ongoing effort to [improve the variables table layout to clearly represent the visual hierarchy](https://gitlab.com/gitlab-org/gitlab/-/issues/403176) between group and project variables and enhancing the [audit history for CI variables](https://gitlab.com/gitlab-org/gitlab/-/issues/416148) to provide better visibility into activities related to variables.\n\n## Read more about our UI improvements\n- [How we overhauled GitLab navigation](https://about.gitlab.com/blog/navigation-research-blog-post/)\n- [Beautifying our UI: Giving GitLab build features a fresh look](https://about.gitlab.com/blog/beautifying-of-our-ui/)\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[109,3644,2243,9],{"slug":7077,"featured":91,"template":686},"revisiting-the-variables-management-workflow","content:en-us:blog:revisiting-the-variables-management-workflow.yml","Revisiting The Variables Management Workflow","en-us/blog/revisiting-the-variables-management-workflow.yml","en-us/blog/revisiting-the-variables-management-workflow",{"_path":7083,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7084,"content":7090,"config":7096,"_id":7098,"_type":14,"title":7099,"_source":16,"_file":7100,"_stem":7101,"_extension":19},"/en-us/blog/secure-container-images-with-gitlab-and-grype",{"title":7085,"description":7086,"ogTitle":7085,"ogDescription":7086,"noIndex":6,"ogImage":7087,"ogUrl":7088,"ogSiteName":670,"ogType":671,"canonicalUrls":7088,"schema":7089},"How to secure your container images with GitLab and Grype","Learn how to start detecting vulnerabilities in your container images in just a few steps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671873/Blog/Hero%20Images/logos_header.jpg","https://about.gitlab.com/blog/secure-container-images-with-gitlab-and-grype","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to secure your container images with GitLab and Grype\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dan Luhring\"}],\n        \"datePublished\": \"2021-07-28\",\n      }",{"title":7085,"description":7086,"authors":7091,"heroImage":7087,"date":7093,"body":7094,"category":791,"tags":7095},[7092],"Dan Luhring","2021-07-28","> Support for the Grype scanner in the GitLab Container Scanning analyzer is being deprecated in GitLab 16.9 and will be removed in GitLab 17.0. Users are advised to use the default setting for `CS_ANALYZER_IMAGE`, which uses the Trivy scanner. Users who desire to continue using Grype can use the [Security Scanner Integration\ndocumentation](https://docs.gitlab.com/ee/development/integrations/secure.html) to create their own integration with GitLab.\n\n## The importance of container image security\n\nThanks to containers, what it means to \"ship software\" has changed dramatically. Engineering teams have shifted to produce container images and use these container images to deploy their software. Because of this change teams are now shipping significantly more software alongside their app – whether they realize it or not.\n\nBesides packaging an application, container images also include hundreds of binaries and libraries. These binaries and libraries are included in the container image produced by the team because the process of creating a container image requires teams to select a base image. A base image is a preexisting container image on which to \"base\" their own container image. In doing so, all software contained in the base image is inherited into the team's new image.\n\nThe shift to containers has a monumental impact on security. Now, anyone that deploys your team's container image could be deploying software with known vulnerabilities. Similarly, other teams that base their container images on your team's image will inherit any vulnerabilities present in your team's image. It's crucial that teams have a solution in place for detecting these vulnerabilities in the container images they're using.\n\n## Container Scanning with Grype\n\nFortunately, GitLab 14.0 offers a new way for teams to tackle this challenge: [Grype](https://github.com/anchore/grype). Anchore developed this state-of-the-art vulnerability scanner, which is now available as part of GitLab's Container Scanning feature.\n\nGrype is an advanced vulnerability scanner because it performs deep inspection of the software installed in a container image, and it uses this detailed information to produce better matches with vulnerability data.\n\nGrype is a particularly powerful tool for security-minded engineers to investigate and remediate findings because it gives comprehensive information in the vulnerability analysis, showing exactly how the tool determined vulnerability _X_ matched software package _Y_. Grype provides the transparency and detail necessary for any reported vulnerability to investigate why the image vulnerability is being reported. Some examples of what Grype can identify include: The exact image layer and file path where a package is installed, the source of the vulnerability data, available patches, and which parameters of the vulnerability record matched attributes of the package, among other things.\n\n\"We are excited to embed these very robust container scanning features of Grype within the GitLab DevOps platform,\" says [Sam White](/company/team/#sam.white), senior product manager of Protect at GitLab. \"Our built-in security enables DevOps velocity with confidence and these added features brings even greater security for cloud native applications.\"\n\n## Get started with Grype and GitLab\n\nFollow these steps to get set up GitLab's integration with Grype.\n\n### What you'll need:\n\n- [GitLab Ultimate](/pricing/ultimate/)\n- Access to an image in a container registry (such as the container registry in your GitLab project)\n- Ensure your CI/CD pipeline meets all of the [requirements](https://docs.gitlab.com/ee/user/application_security/container_scanning/index.html#requirements) for Container Scanning.\n\n### How to start scanning with Grype\n\nTo get started, just add the following snippet to your project's `.gitlab-ci.yml` file:\n\n```yaml\ninclude:\n  - template: Security/Container-Scanning.gitlab-ci.yml\n\ncontainer_scanning:\n  variables:\n    CS_ANALYZER_IMAGE: registry.gitlab.com/security-products/container-scanning/grype:4\n```\n\nBy default, the Container Scanning analyzer makes some assumptions about your target container image's URL and tag. You can have the scanner analyze any container image you want — you just need to specify [additional variables](https://docs.gitlab.com/ee/user/application_security/container_scanning/index.html#available-cicd-variables) in the \"container_scanning\" section of your `.gitlab-ci.yml` file. This set of variables also lets you configure registry credentials, custom CA certificates, whether to validate certificates, etc.\n\n## Viewing vulnerability analysis results\n\nOnce your first Container Scanning job completes, you can see what vulnerabilities have been reported. Just go to the \"Security & Compliance\" left-side menu and select \"Vulnerability Report\".\n\n![GitLab Security and Compliance Menu](https://about.gitlab.com/images/blogimages/anchore_blog_images/gitlab-security-menu.jpg){: .shadow}\nNavigate to \"Vulnerability report\" under the \"Security and Compliance\" menu.\n{: .note.text-center}\n\nFor example, here's what your vulnerability report could look like:\n\n![Sample vulnerability report](https://about.gitlab.com/images/blogimages/anchore_blog_images/gitlab-vulnerability-report.jpg){: .shadow}\nSee a sample Vulnerability Report\n{: .note.text-center}\n\nYou'll notice that the Vulnerability Report page gives you an immediate sense of the severities of the vulnerabilities.Even if there is a large number of vulnerabilities, you can quickly filter the list and dive deeper into any single vulnerability.\n\n## Final thoughts\n\nAdding Container Scanning with Grype to your GitLab pipeline is a straightforward process. With just a small snippet of YAML and some optional configuration, you can add tremendous visibility into the security of your team's container images.\n\nRead on to learn more about the [Container Scanning feature with GitLab](https://docs.gitlab.com/ee/user/application_security/container_scanning/index.html).\n\nLastly, make sure to check out the [Grype project](https://github.com/anchore/grype). We have an active open source community and make improvements all the time. If you have any questions or feature requests, don't hesitate to [open an issue](https://github.com/anchore/grype/issues/new/choose) or join our [community Slack](https://anchore.com/slack).",[9,875,9],{"slug":7097,"featured":6,"template":686},"secure-container-images-with-gitlab-and-grype","content:en-us:blog:secure-container-images-with-gitlab-and-grype.yml","Secure Container Images With Gitlab And Grype","en-us/blog/secure-container-images-with-gitlab-and-grype.yml","en-us/blog/secure-container-images-with-gitlab-and-grype",{"_path":7103,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7104,"content":7110,"config":7115,"_id":7117,"_type":14,"title":7118,"_source":16,"_file":7119,"_stem":7120,"_extension":19},"/en-us/blog/secure-containers-devops",{"title":7105,"description":7106,"ogTitle":7105,"ogDescription":7106,"noIndex":6,"ogImage":7107,"ogUrl":7108,"ogSiteName":670,"ogType":671,"canonicalUrls":7108,"schema":7109},"A shift left strategy for the cloud","Protect your software in the cloud by bringing vulnerability testing closer to remediation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670146/Blog/Hero%20Images/containers-for-five-things-kubernetes-blog-post.jpg","https://about.gitlab.com/blog/secure-containers-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A shift left strategy for the cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cindy Blake\"},{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2019-05-03\",\n      }",{"title":7105,"description":7106,"authors":7111,"heroImage":7107,"date":7112,"body":7113,"category":679,"tags":7114},[1921,1016],"2019-05-03","\n\nBusinesses continually adopt new technologies to become more efficient and\neffective. This move toward efficiency in IT has brought a “shift left” to\n[application security](/topics/devsecops/) testing. Methodologies like DevOps and Agile work with iterative\nand [MVP](https://www.agilealliance.org/glossary/mvp/) states, meaning that apps are constantly updating and constantly need\ntesting and retesting – sometimes daily or multiple times per day.\n\n[Serverless](/topics/serverless/), cloud native, containers, and Kubernetes are changing how apps are\ndeployed and managed. This has expanded the attack surface in the form of new\nlayers of complexity and more settings and updates to manage, which also means\nmore room for manual error. In a container, this includes the image, registry,\nand east-west traffic, while in Kubernetes, this includes access and\nauthentication, runtime resources, and network policies. Traffic between apps\nin a container does not cross perimeter network security, but should still be\nmonitored for malicious traffic between apps and the resources they use.\n\n## Your cloud-based ecosystem doesn’t provide comprehensive security\n\nCloud providers, orchestrators, and other partners don’t provide a full\nspectrum of security capabilities out of the box – even with their help, your\nteam must create and maintain their own security policies and continuously\nmonitor your ecosystem for any unusual or malicious activity. While network\nsegmentation and perimeter security for your guest VMs or containers might be\navailable, your engineer will typically need to configure that.\n\nThe figure below outlines the responsibilities of cloud providers, security\nvendors, and end-users, across apps, hosts, networks, and foundation services.\nThe responsibilities in purple and orange are _primarily_ the responsibility of\nthe cloud provider and security vendors, but our engineers tell us that they\nare involved in every cell of this chart in some way.\n\n![Security responsibilities in your cloud ecosystem](https://about.gitlab.com/images/blogimages/container-security-responsibilities.png){: .shadow.medium.center}\n\n## Treat security as a critical outcome, not a department\n\nSecurity should be top of mind for everyone in the business, not just your\nsecurity team. While the complexity of your infrastructure builds, new tools\nand capabilities give opportunity for everyone to contribute to the security\neffort. Here are a few areas of change that will help you rally the masses in\ndefense of your business:\n- Cloud providers are beginning to offer more security capabilities.\n- System updates – and staying current with your patches – could very much save the day.\n- Automating your processes could make or break the business. While guidelines\nfor humans are necessary, you need automation to abstract the complexity of\nyour infrastructure. Soon, automated capabilities to translate plain-language\npolicies into the growing multitude of settings will make their way into the\nmarket.\n\n### Take a Zero Trust approach to your applications\n\nThe foundational idea of [Zero Trust](/blog/evolution-of-zero-trust/) is simple: Trust nothing and always assume\nthe bad guys are trying to get in. It’s time to take your security beyond the\ntraditional network-perimeter approach and extend Zero Trust from data,\nnetwork, and endpoints to your application infrastructure. It also wouldn’t\nhurt to protect the software development lifecycle (SDLC) to ensure the integrity of your software is not\ncompromised, given all of the automation in a typical DevOps toolchain.\n\n## Three key principles to secure next-generation IT\n\n### 1. Enhance your security practices with DevSecOps\n\nAs you iterate on software, dovetail security into each iteration through [DevSecOps](/solutions/security-compliance/) – not simply\nto test security for the entire history of the app, but to test the impact of\neach change made in every update. Retrofitting your apps and software for\nsecure functionality will slow down your release cycle. Marrying the two\nwill save both time in the present, and heartache in the future when\nyour software is inevitably attacked. Unfortunately, traditional methods don’t\nfit the bill when it comes to DevOps; it’s too expensive and too robust to\nscan every piece of code manually. With a [shift left](/topics/ci-cd/shift-left-devops/) strategy, security scans can be automated into every\ncode commit – meaning you no longer need to choose between risk, cost, and\nagility.\n\n[Arm your developers to resolve vulnerabilities early in the SDLC, leaving your\nsecurity team free to focus on exceptions](/blog/speed-secure-software-delivery-devsecops/).\nWith GitLab, a [review app](https://docs.gitlab.com/ee/ci/review_apps/) is spun up at code commit – before the\nindividual developer’s code is merged to the master. The developer can see and\ntest the working application, with test results highlighting the impact of the\ncode change. [Dynamic application security testing](https://docs.gitlab.com/ee/user/application_security/dast/) (DAST)\ncan then scan the review app, and the developer can quickly iterate to resolve\nvulnerabilities reported in their pipeline report.\n\n![View dynamic application security testing within GitLab.](https://about.gitlab.com/images/blogimages/dast-example.png){: .shadow.medium.center}\n[Learn more about DAST in GitLab's product documentation.](https://docs.gitlab.com/ee/user/application_security/dast/)\n\n### 2. Secure horizontally before digging deeper\n\nWe often fall into the trap of going deep on a single aspect of security –\nleaving other obvious aspects completely exposed. For instance, you may\nuse a powerful scanner for your mission-critical apps but neglect to scan\nothers; or, you may choose to save resources by not scanning your third-party\ncode, with the assumption that its widespread use means it’s checked out.\n\nAvoid focusing so much on application security that you forget about container\nscanning, orchestrators, and access management.\n\n### 3. Simplicity and integration wins\n\nThe key is to bring security scanning to the development process by having a\ntool like GitLab that allows developers to stay within the same platform or\ninterface to both code and scan. Making the process easier increases the\nlikelihood that it’ll get done – and making the process automatic within the\ntool ensures that it will happen every time there is a code update.\n\nReady to deliver secure apps with every update? [Just commit.](/solutions/security-compliance/)\n{: .alert .alert-gitlab-purple .text-center}\n\nCover image by [Frank McKenna](https://unsplash.com/@frankiefoto) on [Unsplash](https://unsplash.com/photos/tjX_sniNzgQ?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[1041,9,1477,875],{"slug":7116,"featured":6,"template":686},"secure-containers-devops","content:en-us:blog:secure-containers-devops.yml","Secure Containers Devops","en-us/blog/secure-containers-devops.yml","en-us/blog/secure-containers-devops",{"_path":7122,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7123,"content":7128,"config":7133,"_id":7135,"_type":14,"title":7136,"_source":16,"_file":7137,"_stem":7138,"_extension":19},"/en-us/blog/secure-journey-continuous-delivery",{"title":7124,"description":7125,"ogTitle":7124,"ogDescription":7125,"noIndex":6,"ogImage":1031,"ogUrl":7126,"ogSiteName":670,"ogType":671,"canonicalUrls":7126,"schema":7127},"Securing the journey to continuous delivery","The UK Dept for Work and Pensions bring security best practices to the forefront of a massive transition to continuous delivery.","https://about.gitlab.com/blog/secure-journey-continuous-delivery","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Securing the journey to continuous delivery\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2019-10-30\",\n      }",{"title":7124,"description":7125,"authors":7129,"heroImage":1031,"date":7130,"body":7131,"category":679,"tags":7132},[1016],"2019-10-30","\n[Adam Moss](https://www.linkedin.com/in/adam-moss/?originalSubdomain=uk) is the\nHead of Engineering Strategy, Technical Leadership, DevOps, and SRE at the\nDepartment for Work and Pensions. At this year’s GitLab Commit in London, Adam\nspoke about how his organization transitioned from waterfall to Agile, and how\nthey built security into both their organization's infrastructure and culture.\n\nThe Department for Work and Pensions (DWP) is the United Kingdom’s largest\ngovernment department. It comprises 84,000 employees and serves 22 million\ncitizens, with systems containing approximately 55 million lines of code and\nseeing about 10,000 changes per year.\n\nIn other words, it’s a big deal.\n\nBut their infrastructure and operations were less than stellar. Adam and his\nteam wanted to offer 24/7 service availability, improve their user experience,\nand reduce operational costs. So, they went Agile.\n\n## Big change for big gains\n\nBefore the transformation, the DWP had outsourced services for 30 years. To get\nto [continuous delivery](/topics/continuous-delivery/), they brought everything in-house. In addition to massive\noperational change, this also required an enormous cultural shift within the\norganization. Insourcing meant taking responsibility for everything – they couldn't blame a third party should anything go wrong. Teams also had to take on an iterative mindset: Changing their standard maximum viable product into a minimum one.\n\nThen there was the question of tools, which also brought the question of\nsecurity: What tools would best enable developers, without leaving gaping holes\nin their systems?\n\n## Owning the risk\n\nAs a government organization, the DWP was used to managing risk – but they\nsuddenly found themselves without an outsourced partner to blame. Now that Adam’s\nteam was fully responsible for security efforts, they needed to become much\nmore risk averse. Taking ownership of security is also a big change for\ndevelopers, even for organizations not undergoing massive transformation.\n\n### The journey to DevOps security\n\n#### Considerations\n\nTo keep both processes and systems secure, the DWP took a multi-layered\napproach with people, devices, and code among the top aspects considered.\n\nDevelopers are often highly privileged users, which poses certain risks to your\nenvironment. While it’s necessary to protect both systems and people,\norganizations need to be clear about their security policies and intent in\norder to build and maintain employee trust. Adam puts it this way: Think about\ndisciplinary policies – if a piece of vulnerable code is released and causes a\nproblem, is it the individual’s fault? Or is it a fault of the processes you’ve\nput in place?\n\nAdam also emphasized that restrictions might not be the best answer: Developers\nwill find a way around, so it’s better to implement something that allows\nthem to achieve their objectives without creating any backdoor processes.\n\nThere was also the consideration of open source – while it provides great\nbenefits, there are challenges that must also be managed appropriately. Adam’s\nteam chose to implement continuous vulnerability monitoring (with [GitLab](/solutions/security-compliance/))\nto keep track of any risky dependencies that might spring a data leak. They\nalso chose to use GitLab as a central point of control and single source of\ntruth, increasing transparency for the organization.\n\n#### Lessons learned\n\nIn his presentation, Adam shared some valuable tips for a successful\ntransition to continuous delivery. Here are a few favorites:\n\n##### Automate, automate!\nAutomation will make things immensely easier – not just because of the time\nsaved, but also because of its repeatability and reduced risk for human error.\nFocus on the low-hanging fruit early on in the process. There will always be things you can’t\nautomate, so pick the easy battles first.\n\n##### Identify your pain point\nTake a look across your operations and organization. What is the biggest\nchallenge you can solve? Or, what change will bring a lot of value in the move\nto continuous delivery? Try to achieve ROI as soon as possible.\n\n##### Anticipate risks from an external POV\nAdam recommends threat modeling, and looking at security from the outside in.\nWhat might an adversary be thinking? Why and how might they attack? Some tools\nwill even generate possible situations that you’ve never considered.\n\n##### Continuous doesn’t always mean automatic\nWhile you may want to automate functions as much as possible, the catalyst can\nstill be human. Separation of duties can serve as a useful defense mechanism to\nensure that big changes won’t cause undue risk.\n\n## The journey doesn’t end with DevOps\n\nAdam concludes with some wisdom for the future: Always be thinking about how\nyou’re going to evolve your organization, and make sure your roadmap continues to change as well.\nHe suggests looking externally for options you might not have yet considered,\nlike the capabilities planned for [your favorite DevOps tools](/direction/#devops-stages).\n\nTo build some new ideas into your own roadmap, watch Adam’s talk from GitLab\nCommit London.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/c8zFXUkPb2c\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[875,9,855,977,267],{"slug":7134,"featured":6,"template":686},"secure-journey-continuous-delivery","content:en-us:blog:secure-journey-continuous-delivery.yml","Secure Journey Continuous Delivery","en-us/blog/secure-journey-continuous-delivery.yml","en-us/blog/secure-journey-continuous-delivery",{"_path":7140,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7141,"content":7147,"config":7152,"_id":7154,"_type":14,"title":7155,"_source":16,"_file":7156,"_stem":7157,"_extension":19},"/en-us/blog/secure-stage-for-appsec",{"title":7142,"description":7143,"ogTitle":7142,"ogDescription":7143,"noIndex":6,"ogImage":7144,"ogUrl":7145,"ogSiteName":670,"ogType":671,"canonicalUrls":7145,"schema":7146},"How GitLab's application security dashboard helps AppSec engineers","GitLab Security features help application security engineers collaborate more efficiently and better assess the security posture of the projects they oversee.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663482/Blog/Hero%20Images/ralph-kayden-4Cg5T03B_8s-unsplash.jpg","https://about.gitlab.com/blog/secure-stage-for-appsec","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab's application security dashboard helps AppSec engineers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2020-07-07\",\n      }",{"title":7142,"description":7143,"authors":7148,"heroImage":7144,"date":7149,"body":7150,"category":791,"tags":7151},[4451],"2020-07-07","\n[Application Security (AppSec)](/topics/devsecops/) engineers focus on enhancing an application's security, by\nfinding, resolving, and preventing vulnerabilities. But managing all these\nvulnerabilities across different teams and projects is not an easy process. Managing vulnerabilities\ncan be simplified by using the [GitLab Secure](/stages-devops-lifecycle/secure/)\nfeatures found in [GitLab Ultimate](/pricing/ultimate/).\n\nOne of the significant capabilities of GitLab Secure is the accurate, automated, and continuous assessment of the\nsecurity of your applications and services through a unified dashboard.\n\nIn this blog post, I will show four ways GitLab Secure makes life easier for the AppSec\nengineer.\n\n---\n\n## Finding vulnerabilities with security scans\n\nThe first capability that AppSec engineers will find useful is the robust security scanning capabilities in [GitLab Ultimate](/pricing/ultimate/).\n\nThese capabilities allow you to proactively identify vulnerabilities and weaknesses to minimize your security risk\nusing a variety of defense-in-depth techniques. The security scans include the following:\n\n* [Static Application Security Testing (SAST)](https://docs.gitlab.com/ee/user/application_security/sast/)\n* [Dynamic Application Security Testing (DAST)](https://docs.gitlab.com/ee/user/application_security/dast/)\n* [Container Scanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/)\n* [Dependency Scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/)\n* [License Scanning](https://docs.gitlab.com/ee/user/compliance/license_compliance/index.html)\n\n![pipeline with security scans](https://about.gitlab.com/images/blogimages/secure-stage-for-infosec-professionals/pipeline.png)\nGitLab pipeline running security scans\n{: .note.text-center}\n\nSimply add a [template](https://gitlab.com/gitlab-org/gitlab/-/tree/master/lib/gitlab/ci/templates/Security)\nto your [.gitlab-ci.yml](https://docs.gitlab.com/ee/ci/yaml/#includetemplate) or by enable [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) to set-up the scans.\n\nWhen submitting a merge request (MR), the security scans will run and populate the MR with\ndata on the vulnerabilities detected and how to resolve them. This data allows AppSec engineers\nto begin risk analysis and remediation.\n\n## Managing vulnerabilities with the Security Dashboard\n\nThe second most useful capability for AppSec engineers is the [Security Dashboard](https://docs.gitlab.com/ee/user/application_security/security_dashboard/), which helps\nkeep projects organized and summarizes the relevant security details for an application, all in one place.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/t-3TSlChHy4\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nThe Security Dashboard in [GitLab Ultimate](/pricing/ultimate/) provides a high-level overview of the status of all the vulnerabilities\ndetected in groups, projects, and pipelines.\n\n![security dashboard with group view](https://about.gitlab.com/images/blogimages/secure-stage-for-infosec-professionals/dashboard-group.png)\nSecurity Dashboard Group-Level view\n{: .note.text-center}\n\nBy using the Security Dashboard, an AppSec engineer can drill down into each\nvulnerability to obtain additional information, such as how to resolve the vulnerability,\nhow it was handled by the developer, and if a work ticket (or GitLab issue) has been opened\nfor remediation.\n\nThe Security Dashboard also shows which file the vulnerability was detected in. Each vulnerability\nis assigned a severity and a report type. By using this information an AppSec Engineer\ncan quickly identify which items is the most critical for the team to tackle first.\n\n![security dashboard with project view](https://about.gitlab.com/images/blogimages/secure-stage-for-infosec-professionals/dashboard-project.png)\nSecurity Dashboard project-level view\n{: .note.text-center}\n\nIt's important to note the Security Dashboard supports integrations with third-party scanners.\nFor example, if you are using [WhiteSource](https://www.whitesourcesoftware.com/gitlab/), the\nscans results can added to and managed in the Security Dashboard.\n\n## Auditing with the Security Dashboard\n\nA third capability GitLab Secure offers AppSec engineers is auditing. The engineer can use this capability to audit\na project or group based on the vulnerabilities revealed in various tests. By using the Security Dashboard,\nthe AppSec engineer can see which vulnerabilities have been dismissed, the developer who dismissed them, as\nwell as the reason why they were dismissed. This is helpful for several reasons:\n\n* Check to make sure the development team is practicing secure coding\n* Confirm there are no malicious actors dismissing issues\n* Keep track of the status of vulnerabilities which could not be immediately resolved\n\n![security dashboard vulnerability info](https://about.gitlab.com/images/blogimages/secure-stage-for-infosec-professionals/dashboard-info.png)\nVulnerability info displayed in Security Dashboard\n{: .note.text-center}\n\nAn AppSec engineer can also track and create [confidential issues](https://docs.gitlab.com/ee/user/project/issues/confidential_issues.html) from the\nSecurity Dashboard. A team can keep track of the status\nof a vulnerability in private, and make sure it is still on track to being resolved when using confidential issues. A\nconfidential branch can be created along with the issue, so that the development team\ncan work on a resolution without tipping off malicious actors.\n\n![security dashboard confidential issue creation](https://about.gitlab.com/images/blogimages/secure-stage-for-infosec-professionals/dashboard-issue-creation.png)\nConfidential issues created via Security Dashboard\n{: .note.text-center}\n\n## Managing software licenses\n\nThe final capability we recommend AppSec engineers use is our license management.\n\nTypically, developers will use a variety of open source dependencies instead of reinventing the wheel.\nThere is a problem though: using a dependency with a restrictive license can invalidate your application.\n\nAn AppSec engineer is able to add a policy to mark licenses as acceptable or unacceptable for a project and its dependencies.\nIf an unacceptable license is found, the MR can be blocked. The video below provides\nan overview:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/42f9LiP5J_4\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nGitLab Secure capabilities enhance the effiency of AppSec engineers, ultimately\nleading to the production of more secure applications and a more security-empowered\ndevelopment team. Learn more at [DevSecOps](/solutions/security-compliance/) and\ncheckout the [GitLab Secure direction page](/direction/secure/) for more\ninformation on the upcoming features and integrations.\n\n### Level up your DevSecOps knowledge:\n\n  [GitLab's security tools and the HIPAA risk analysis](https://about.gitlab.com/blog/gitlab-security-tools-and-the-hipaa-risk-analysis/)\n  [A deep dive into the Security Analyst persona](https://about.gitlab.com/blog/a-deep-dive-into-the-security-analyst-persona/)\n  [Compliance made easy with GitLab](https://about.gitlab.com/blog/compliance-made-easy/)\n\nCover image by [Ralph Kayden](https://unsplash.com/@ralphkayden) on [Unsplash](https://unsplash.com/photos/4Cg5T03B_8s)\n{: .note}\n\n## Learn more about DevSecOps\n\n- [Efficient DevSecOps: 9 tips for shifting left](/blog/efficient-devsecops-nine-tips-shift-left/)\n- [Want better DevSecOps? Try cross-functional collaboration](/blog/achieve-devsecops-collaboration/)\n- [Compliance made easy with GitLab](/blog/compliance-made-easy/)\n\n\u003C%= partial \"includes/blog/blog-merch-banner\" %>\n",[875,9,683,855],{"slug":7153,"featured":6,"template":686},"secure-stage-for-appsec","content:en-us:blog:secure-stage-for-appsec.yml","Secure Stage For Appsec","en-us/blog/secure-stage-for-appsec.yml","en-us/blog/secure-stage-for-appsec",{"_path":7159,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7160,"content":7165,"config":7170,"_id":7172,"_type":14,"title":7173,"_source":16,"_file":7174,"_stem":7175,"_extension":19},"/en-us/blog/securing-the-software-supply-chain-through-automated-attestation",{"title":7161,"description":7162,"ogTitle":7161,"ogDescription":7162,"noIndex":6,"ogImage":2016,"ogUrl":7163,"ogSiteName":670,"ogType":671,"canonicalUrls":7163,"schema":7164},"Securing the software supply chain through automated attestation","Standards bodies want to know how orgs are protecting against software tampering. Learn how automating compliance attestation can help.","https://about.gitlab.com/blog/securing-the-software-supply-chain-through-automated-attestation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Securing the software supply chain through automated attestation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2022-08-10\",\n      }",{"title":7161,"description":7162,"authors":7166,"heroImage":2016,"date":7167,"body":7168,"category":875,"tags":7169},[1454],"2022-08-10","\nSecuring the software supply chain is not a one-and-done proposition. Instead, organizations, especially those in the public sector, must level up their protections as governing bodies add to their security frameworks. If you need proof of this, look no further than the sudden emergence of attestation requirements.\n\nAttestation is [an authenticated statement](https://slsa.dev/attestation-model) (metadata) about a software artifact or collection of software artifacts. Attestation is a key feature of [SLSA](https://slsa.dev/)(Supply chain Levels for Software Artifacts) Certification Level 2, which requires organizations to protect against software tampering and add minimal build integrity guarantees. The concept of attestation, along with presenting a software bill of materials ([SBOM](https://gitlab.com/groups/gitlab-org/-/epics/858)), is featured prominently in the [NIST Secure Software Development Framework](/blog/comply-with-nist-secure-supply-chain-framework-with-gitlab/) and ISACA’s [Certified Information Security Auditor training](https://www.isaca.org/credentialing/cisa).\n\n“In the past few months and in the wake of high-profile security breaches, the major governing bodies have been laser-focused on attestation and the ability to provide a verified artifact from your continuous integration (CI) pipelines that show you’ve completed all your security scans in a way that would be acceptable and compliant with the standards they set forth,” says [Joel Krooswyk](https://gitlab.com/jkrooswyk), senior manager of solutions architects at GitLab.\n\n“While the government is certainly leading on these requirements, the need for attestation applies to everyone,” says [Sam White](https://gitlab.com/sam.white), principal product manager at GitLab. \n\n## The demand for attestation automation\n\nOrganizations might have previously felt comfortable performing periodic self-audits for compliance attestation, but [the stakes are now too high](/blog/biden-administration-celebrates-1-year-anniversary-of-eo-by-accelerating-software-supply-chain-security/) and public sector agencies, as well as private sector organizations, must consider automating this critical task, according to Krooswyk.\n\n“Until now, attestation has been a manual undertaking, which has been burdensome, expensive, and error-prone,” he says. “The more automation we can apply to attestation, and the more consistency we can incorporate from standards requirements, the better off software supply chain security will be and the more confidence we will have in development collaboration.”\n\nGitLab [introduced automated compliance attestation](https://docs.gitlab.com/ee/ci/runners/configure_runners.html#artifact-attestation) in Release 15.1. GitLab Runner can generate and produce attestation metadata for all build artifacts. To enable this feature, you must set the RUNNER_GENERATE_ARTIFACTS_METADATA environment variable to “true”. This variable can either be set globally or it can be set for individual jobs. The metadata is then rendered in a plain text .json file that’s stored with the artifact. \n\nLearn how to automatically generate GitLab SLSA Level 2 Build Artifact Attestation:\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n  \u003Ciframe src=\"https://www.youtube.com/embed/MlIdqrDgI8U\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n## Building attestation into the development lifecycle\n\nSoftware development is a collaborative effort and organizations need to know that upstream dependencies have been built in a secure manner. “Not only do you need to know that the software has been developed without vulnerabilities, but that the machine that software was built on has not been compromised,” White says. “How can you know, without attestation, that the binary itself is authentic and that the risk has been minimized?” By automating attestation, organizations can help protect users of their software from code that has been injected with malware or build servers that have been overtaken.\n\n>Join us at [GitLab Commit 2022](/events/commit/) and connect with the ideas, technologies, and people that are driving DevOps and digital transformation.\n\n“If developers don’t have to worry about the setup or ongoing complexity of attestation, it will be a game-changer for the security industry, because you are validating right at the point of software development,” White says.\n\n## Next up: Integrated code signing and broader participation\n\nAs the public sector wades deeper into compliance, the next logical step is to introduce accountability through code signing. “Next, developers need to cryptographically sign both the build artifact and the attestation file,” White says. “This will add another layer of confidence in the build artifacts and the software supply chain overall.”\n\nAttestation also must become the norm upstream throughout the open source community. “Attestation is very much a network effect where the more people adopt it, the more effective it gets,” Krooswyk says. “Everyone needs to generate their own attestation at the point in time when they build their artifact.” \n\nKrooswyk adds that in addition to SBOM validation, he would like to see attestations expand to include all vulnerabilities that are known at the time a project is built. “We need a continuous ability to create a birth-to-death artifact history,” he says.\n\nAll users on a GitLab 15.1 or later release can get started with generating attestation for their build artifacts by setting `RUNNER_GENERATE_ARTIFACTS_METADATA: true` in their CI pipeline.  For a more comprehensive approach, users can take advantage of security approvals, code scanning, and compliance auditing by using GitLab Ultimate. To test out building a more overarching software supply chain security strategy, try GitLab Ultimate for free with a [30-day trial today](/free-trial/).\n",[9,875,682,184],{"slug":7171,"featured":6,"template":686},"securing-the-software-supply-chain-through-automated-attestation","content:en-us:blog:securing-the-software-supply-chain-through-automated-attestation.yml","Securing The Software Supply Chain Through Automated Attestation","en-us/blog/securing-the-software-supply-chain-through-automated-attestation.yml","en-us/blog/securing-the-software-supply-chain-through-automated-attestation",{"_path":7177,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7178,"content":7184,"config":7189,"_id":7191,"_type":14,"title":7192,"_source":16,"_file":7193,"_stem":7194,"_extension":19},"/en-us/blog/security-culture-devsecops",{"title":7179,"description":7180,"ogTitle":7179,"ogDescription":7180,"noIndex":6,"ogImage":7181,"ogUrl":7182,"ogSiteName":670,"ogType":671,"canonicalUrls":7182,"schema":7183},"DevSecOps basics: how to build a security culture in 6 steps","How to build a DevSecOps culture in your workplace. Get there faster by creating a strong security culture.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663608/Blog/Hero%20Images/security-culture-devsecops.jpg","https://about.gitlab.com/blog/security-culture-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevSecOps basics: how to build a security culture in 6 steps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2020-07-15\",\n      }",{"title":7179,"description":7180,"authors":7185,"heroImage":7181,"date":7186,"body":7187,"category":679,"tags":7188},[1016],"2020-07-15","\n_This is the fourth in our five-part series on [DevSecOps](/topics/devsecops/) basics. Part one offers nine tips to truly [shift left](/blog/efficient-devsecops-nine-tips-shift-left/). Part two outlines the steps needed to create [silo-free collaboration](/blog/achieve-devsecops-collaboration/). And part three looks at the importance of [automated security testing](/blog/devsecops-security-automation/)._\n\n## Are you responsible for security?\n\nEven if it’s not in your title or job description, the answer is yes. Every employee is responsible for the security of their work. Unfortunately, many organizations don’t make this clear and don’t enforce it as policy. As vulnerabilities pile up on the desks of security engineers, developers wonder what’s taking so long – how many times does code have to be fixed before it’s deemed secure? [DevSecOps](/solutions/security-compliance/) flips traditional security on its head, but needs a strong security culture for sustainable success.\n\n## What is security culture?\n\nA security culture means that everyone – from board members to interns – must care about security and take actions to maintain it. Security should be considered in every piece of work and at every decision. \n\nThis may seem counterintuitive and not the efficiency promised by [DevSecOps](https://about.gitlab.com/solutions/security-compliance/). But by embedding security into every employee’s actions, the security team’s workload is streamlined and the end product is more secure. This is what companies mean when they talk about shifting security left: Bringing security forward in the software development life cycle to improve planning, test more code, and build accountability among non-security team members. \n\n## How to make security culture your default state\n\nUnless you’ve included security in every employee’s onboarding, creating a widespread security culture mindset will be challenging. Employees will need to think differently, behave differently, and eventually turn those changes into habits so that security becomes a natural part of their day-to-day work. \n\n### Step 1: Culture change starts at the top\n\nIf your organization has left security to \"the team,\" moving to a security culture will require board members and executives to be very involved in this change. Once execs are on board, work with thought leaders across the company to develop a security awareness and training program. Set the tone by making security a company-wide initiative, letting everyone know that security is top priority regardless of job function or organization. \n\n### Step 2: Awareness, education, and mutual understanding\n\nGive employees training on how they should incorporate security practices into everything they do. Transparency is key to building trust, so it’s important that employees understand why security is necessary and how they can contribute to the overall goal. On the other side, educate security practitioners about the demands placed on the business and [DevOps practices](/topics/devops/). This will help them help you create policies that move security and development forward together. \n\n### Step 3: Appoint security champions in dev \n\nSome employees will adopt security enthusiastically. Recruit those people to champion awareness and adoption among their peers. It may be helpful to provide your security champions extra resources and educational opportunities to boost their knowledge and make them an accessible resource for those around them. \n\n### Step 4: Encourage cross-functional collaboration\n\nTeam members should feel comfortable reaching out across functions, asking questions, and sharing (non-sensitive) information. DevSecOps breaks down silos to create a more efficient process, but it also does this to improve communication and build camaraderie between teams. If security is made into a multi-team effort, employees will feel encouraged to jump on the secure work bandwagon. \n\n### Step 5: Give developers the tools they need\n\nSecurity behaviors will be more readily adopted if they fit seamlessly into the developer’s workflow. [Security as code](/blog/how-to-security-as-code/) plays a big role here: Developers can produce more secure work when policies, tests, and scans are integrated into the pipeline and code itself. Excessive tool-switching will negate the benefits of shifting left, so it’s best to maintain efficiency by keeping your tech stack as simple as possible.\n\n### Step 6: Automate when appropriate\n\nAutomation is crucial for scaling security and will make adoption even easier for non-security employees. Within the developer’s workflow, static [application security](/topics/devsecops/) tests can be run against every code commit. Those scans can automatically produce a work ticket or [populate a security dashboard](https://docs.gitlab.com/ee/user/application_security/security_dashboard/). \n\n## Culture change: Worth the challenge\n\nSecurity isn’t an option: It’s a requirement. Security culture will always be worth the effort. Making security a top priority for the people in your organization will fortify your tech defenses and help you innovate in ways that will (hopefully) withstand the ever-changing threat landscape. \n\n_How efficient are your DevSecOps practices? [Take our DevSecOps Maturity Assessment to find out.](https://about.gitlab.com/resources/devsecops-methodology-assessment/)_\n\n**Learn more about DevSecOps:**\n* [How to harden your GitLab instance](/blog/gitlab-instance-security-best-practices/)\n\n* [Why DevSecOps must start with automated security testing](/blog/devsecops-security-automation/)\n\n* [How to capitalize on GitLab Security tools with external CI](https://docs.gitlab.com/ee/integration/jenkins.html)\n\nCover image by [Lindsay Henwood](https://unsplash.com/@lindsayhenwood) on [Unsplash](https://unsplash.com/photos/7_kRuX1hSXM)\n{: .note}\n",[9,875,916],{"slug":7190,"featured":6,"template":686},"security-culture-devsecops","content:en-us:blog:security-culture-devsecops.yml","Security Culture Devsecops","en-us/blog/security-culture-devsecops.yml","en-us/blog/security-culture-devsecops",{"_path":7196,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7197,"content":7202,"config":7207,"_id":7209,"_type":14,"title":7210,"_source":16,"_file":7211,"_stem":7212,"_extension":19},"/en-us/blog/security-features-in-ultimate",{"title":7198,"description":7199,"ogTitle":7198,"ogDescription":7199,"noIndex":6,"ogImage":5226,"ogUrl":7200,"ogSiteName":670,"ogType":671,"canonicalUrls":7200,"schema":7201},"Tired of afterthought security? Take a fresh look at GitLab Ultimate","Security may not be the first thing that comes to mind when thinking of our DevOps platform, but we’re going to make the case it should be. Here’s a look at some of the too-often-overlooked security features in GitLab Ultimate.","https://about.gitlab.com/blog/security-features-in-ultimate","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tired of afterthought security? Take a fresh look at GitLab Ultimate\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cindy Blake\"}],\n        \"datePublished\": \"2020-12-08\",\n      }",{"title":7198,"description":7199,"authors":7203,"heroImage":5226,"date":7204,"body":7205,"category":875,"tags":7206},[1921],"2020-12-08","At GitLab, we have worked hard to make [application security testing](/topics/devsecops/) a natural by-product of software development. We started with the developer, bringing scan results into their native workflow, then we added a dashboard for the security pro. We acquired Gemnasium and most recently [Peach Tech and Fuzzit](/blog/fuzz-testing/). We have a board goal to be a world-class security product and have allocated just under 25% of our R&D budget to these capabilities. \n\nWe know our SAST, dependency, container, and other scanners are great but we’d also bought into the idea that people choose to use our [DevOps platform](/solutions/devops-platform/) largely because of CI or SCM and our security is just an added bonus. \n\nBut it seems we are our own worst critic, especially on how we determine product maturity. Data points include:\n\n* The technology review site G2 shows [GitLab’s static application security testing (SAST) is top rated](https://www.g2.com/categories/static-application-security-testing-sast#grid). \n* As of Dec. 4, 2020, GitLab has an Overall Rating of 4.6 out of 5 in the Application Security Testing market on [Gartner Peer Insights](https://www.gartner.com/reviews/market/application-security-testing/vendor/gitlab/product/gitlab), based on 32 reviews.\n  * _Gartner Peer Insights reviews constitute the subjective opinions of individual end users based on their own experiences and do not represent the views of Gartner or its affiliates._\n* Dev-Insider 2020 Platinum award for [best code and composition analysis](https://www.storage-insider.de/die-leser-haben-entschieden-die-gewinner-der-it-awards-2020-a-973498/).\n\nAnd customers are noticing too:\n\n* “GitLab Secure replaced Veracode, Checkmarx, and Fortify in my DevOps toolchain. GitLab scans faster, is more accurate, and doesn’t require my developers to learn new tools.” \t\n\n* “GitLab Secure enables us to ship faster. Our other scanner tools could take up to a day to finish scanning whereas GitLab scans finish in as little a few minutes.” \n\nHere’s a look at other built-in security features in Ultimate for self hosted and Gold for SaaS.\n\n## Vulnerability scans (no assembly required)\n\nIf there are two truths in security, it’s these: The more you scan, the less risk you will have, and it’s cheaper to find and fix vulnerabilities in development than later in the lifecycle. Developers need access to that data in their workflow. GitLab Ultimate/Gold offers comprehensive scanning, out of the box with no integration required: [dynamic](https://docs.gitlab.com/ee/user/application_security/dast/) and [static](https://docs.gitlab.com/ee/user/application_security/sast/) (now including mobile apps), [container scanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/), [dependency scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/), [API scanning](https://docs.gitlab.com/ee/user/application_security/dast/#api-scan), and [fuzz testing](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/), along with scanning for [secrets](https://docs.gitlab.com/ee/user/application_security/secret_detection/) and [license compliance](https://docs.gitlab.com/ee/user/compliance/license_compliance/). All of these scans are built into the workflow with results presented in the MR pipeline – meaning busy developers don’t have to go hunting for results. \n\nThe scans are also easy to apply for security pros. With one click, you can choose what to do via AutoDevOps, or add in third-party scanners via the `ci.yml`. Just start with a [CI job definition](https://docs.gitlab.com/ee/development/integrations/secure.html#job-definition). We’ve even added a handy UX so non-developers can modify the `ci.yml` without coding (add link). By using CI templates you can easily set and apply [security policies](https://docs.gitlab.com/ee/user/application_security/configuration/) for merge approvals and more. You can also [limit security scanning to running offline](https://docs.gitlab.com/ee/user/application_security/offline_deployments/) for highly sensitive environments. \n\n## Comprehensive dashboards\n\nWhile this developer-first perspective will reduce vulnerabilities, they can’t all be fixed on the spot. So our [security dashboard](https://docs.gitlab.com/ee/user/application_security/security_dashboard/) capability (included with GitLab Ultimate/Gold) helps security pros manage remaining vulnerabilities. It provides a single source of truth, eliminating translation and friction between development and security, and makes it simple for anyone to see the status of remediation work, who changed what, where and when, and even who approved merge requests across the entire software development lifecycle.\n\nAnd because we know compliance also plays a key role in security, we have a dedicated [compliance dashboard](https://docs.gitlab.com/ee/user/compliance/compliance_report/index.html) that gathers key data to ensure quick and accurate reporting. \n\n## Container monitoring\n\nDevOps teams taking advantage of the modularity of containers also need a way to keep all the moving parts safe. Gitlab Ultimate offers [container threat monitoring](https://docs.gitlab.com/ee/user/application_security/) in addition to container scanning.\n\n## Integrated fuzz testing\n\nThanks to our acquisition of Peach and FuzzIt, GitLab Ultimate now offers integrated [coverage-guided fuzzing and continuous fuzzing](/topics/devsecops/what-is-fuzz-testing/), adding new types of testing previously unavailable.\n\nCover image by [Zhen Hu](https://unsplash.com/@zhenhu2424) on [Unsplash](https://unsplash.com)\n{: .note}\n",[875,9,916,231],{"slug":7208,"featured":6,"template":686},"security-features-in-ultimate","content:en-us:blog:security-features-in-ultimate.yml","Security Features In Ultimate","en-us/blog/security-features-in-ultimate.yml","en-us/blog/security-features-in-ultimate",{"_path":7214,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7215,"content":7220,"config":7225,"_id":7227,"_type":14,"title":7228,"_source":16,"_file":7229,"_stem":7230,"_extension":19},"/en-us/blog/security-gitlab-15",{"title":7216,"description":7217,"ogTitle":7216,"ogDescription":7217,"noIndex":6,"ogImage":1449,"ogUrl":7218,"ogSiteName":670,"ogType":671,"canonicalUrls":7218,"schema":7219},"GitLab's Commitment to Enhanced AppSec in Modern DevOps","Security abounds in our latest DevOps platform release, GitLab 15.","https://about.gitlab.com/blog/security-gitlab-15","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's commitment to enhanced application security in the modern DevOps world\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2022-06-21\",\n      }",{"title":7221,"description":7217,"authors":7222,"heroImage":1449,"date":1455,"body":7223,"category":875,"tags":7224},"GitLab's commitment to enhanced application security in the modern DevOps world",[4451],"\nWith [GitLab 14](/gitlab-14/), we saw deep emphasis on modernizing our DevOps capabilities. This modernization enabled enhanced application security and strenghtened collaboration between developers and security professionals.\n\nWe saw enhancments such as:\n\n- global rule registry and customization for policy requriements with support for separation of duties\n- a newly developed browser-based Dynamic Application Security Testing (DAST) scanner used to test and secure modern APIs and Single Page\nApplications\n- more support for different languages using Semgrep\n- new vulnerability management capabilities to increase visibility\n\nWith the GitLab 15 release, we can see how our commitment to enhancing application security across the board is stronger than ever. In this blog post, I will provide details on how GitLab is commited to enhancing not only security, but efficiency.\n\n> Discover how GitLab 15 can help your team deliver secure software, while maintaining compliance and automating manual processes.\nSave the date for our GitLab 15 [launch event](https://page.gitlab.com/fifteen) on June 23rd!\n\n## GitLab 15 security features\n\nWe see that with every GitLab release, there are plenty of enhancements to our security tools.\nGitLab 15 is no exception! We can see a boatload 🚢 of security enhacements released in GitLab 15 below:\n\n- [Container Scanning available in all tiers](https://docs.gitlab.com/ee/user/application_security/container_scanning/#capabilities)\n- [Audit changes to group IP allowlist](https://docs.gitlab.com/ee/administration/audit_events.html#group-events)\n- [Revoke a personal access token without PAT ID](https://docs.gitlab.com/ee/api/personal_access_tokens.html#using-a-request-header)\n- [Project-level Secure Files in open beta](https://docs.gitlab.com/ee/ci/secure_files/)\n- [Dependency scanning support for poetry.lock files](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/#supported-languages-and-package-managers)\n- [Semgrep-based Static Application Security Testing (SAST) scanning available for early adoption](https://docs.gitlab.com/ee/user/application_security/sast/analyzers.html#transition-to-semgrep-based-scanning)\n- [Access and Verify actions for environments](https://docs.gitlab.com/ee/ci/yaml/index.html#environmentaction)\n- [Terraform CI/CD template authenticates to Terraform module registry](https://docs.gitlab.com/ee/user/infrastructure/iac/#integrate-your-project-with-terraform)\n- [GitLab advisory data included in container scanning results](https://docs.gitlab.com/ee/user/application_security/container_scanning/#vulnerabilities-database)\n- [New audit events for merge settings](https://docs.gitlab.com/ee/administration/audit_events.html#project-events)\n- [Users with the Reporter role can manage iterations and milestones](https://docs.gitlab.com/ee/user/permissions.html)\n- [Dependency path information](https://docs.gitlab.com/ee/user/application_security/dependency_list/#dependency-paths)\n- [Secure and Protect analyzer major version update](https://docs.gitlab.com/ee/user/application_security/)\n- [Static Analysis analyzer updates](https://docs.gitlab.com/ee/user/application_security/sast/analyzers)\n- [Approve deployments from the Environments detail page](https://docs.gitlab.com/ee/ci/environments/deployment_approvals.html)\n- [Scan result policies listed under MR approval settings](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/rules.html#security-approvals)\n\nThese features run across different stages of the software development lifecycle. I have created a video showing some of the coolest new security features\nin GitLab 15:\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/BasGVNvOFGo\" title=\"YouTube video player\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\n## Scanners moved to GitLab Free Tier\n\nA lot of our scanners were only part of GitLab Ultimate in the past. However, over time, certain scanners\nhave been moved over to GitLab Free Tier, enabling you to enhance the security of your application\nno matter what tier of GitLab you are using.\n\n| Scanner            | Introduced | Moved to Free |\n| ------------------ | ---------- | ------------- |\n| SAST               | 10.3       | 13.3          |\n| Container Scanning | 10.4       | 15.0          |\n| Secret Detection   | 11.9       | 13.3          |\n\nWithin the free tier, you are able to download the reports generated by the security scanners.\nThis allows developers to see what vulnerabilities were detected within their source code and\ncontainer images.\n\n![Report on vulnerabilities](https://about.gitlab.com/images/blogimages/modern-devops-security/results_1.png)\n\nHowever, there are benefits to upgrading to Ultimate, which are described below.\n\n## Benefits of upgrading to Ultimate\n\nSome organizations have multiple groups and projects they are working on, as well as a the security team,\nwhich manages all the detected vulnerabilities. While having security scan reports ready for download\nis useful, it is not exactly scalable across an organization. This is where Ultimate assists in enhancing\nDevSecOps efficiency.\n\n### Scanners\n\nWhile the GitLab Free Tier includes SAST, Secret Detection, and Container Scanning to find vulnerabilities\nin your source code, when you upgrade to Ultimate, you are provided with even more scanners. Here are some\nof the additional scanners provided in Ultimate:\n\n- [DAST](https://docs.gitlab.com/ee/user/application_security/dast/index.html)\n- [Operational Container Scanning](https://docs.gitlab.com/ee/user/clusters/agent/vulnerabilities.html)\n- [Dependency Scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/)\n- [Infrastructure as Code Scanning](https://docs.gitlab.com/ee/user/application_security/iac_scanning/)\n- [Coverage-Guided Fuzzing](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/)\n- [Web-API Fuzzing](https://docs.gitlab.com/ee/user/application_security/api_fuzzing/)\n\n### Developer Lifecycle\n\nIn Ultimate, there is enhanced functionality within the developer lifecycle. The merge request a developer creates will\ncontain a security widget which displays a summary of the new security scan results. New results are determined by\ncomparing the current findings against existing findings in the default branch.\n\n![Ultimate security widget](https://about.gitlab.com/images/blogimages/modern-devops-security/developer_1.png)\n\nThe results contain not only detailed information on the vulnerability and how it affects the system, but also\nsolutions to mitigating or resolving the issue. These vulnerabilities are also actionable, meaning that a comment\ncan be added in order to notify the security team, so they may review – enhancing developer and appsec collaboration.\nA confidential issue can also be created so that developers and security professionals can work together towards a\nresolution safely and efficiently.\n\n![Confidential issue](https://about.gitlab.com/images/blogimages/modern-devops-security/developer_2.png)\n\nWhile these features were avaliable in Ultimate on older versions of GitLab, within release 14 this feature was heightened\nto include developer training within the vulnerability, helping to educate developers and make them more security-aware. GitLab 15\nwill provide even more enhancements to the developer lifecycle.\n\n![Ultimate enhancements](https://about.gitlab.com/images/blogimages/modern-devops-security/developer_3.png)\n\n### Security team lifecycle\n\nThere are also several features which greatly benefit members of a security team.\n\nThe security team is able to effectively manage and triage vulnerabilities using the [Vulnerability Reports](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/).\n\n![Vulnerability reports](https://about.gitlab.com/images/blogimages/modern-devops-security/appsec_1.png)\n\nThe [security dashboard](https://docs.gitlab.com/ee/user/application_security/security_dashboard/) allows the security team to assess the security posture\nof a project or group of projects. This is helpful to see how many vulnerabilities were introduced/resolved over time, as well as which projects require more\nattention than others\n\n![security dashboard](https://about.gitlab.com/images/blogimages/modern-devops-security/appsec_2.png)\n\nSeparation of duties can be enforced using [Compliance Frameworks](https://docs.gitlab.com/ee/user/project/settings/#compliance-frameworks)\nand [Security Policies](https://docs.gitlab.com/ee/user/application_security/policies/) assuring code requires approval before making it to production.\n\n![Separation of duties](https://about.gitlab.com/images/blogimages/modern-devops-security/appsec_3.png)\n\nThese are just some of the features GitLab has to offer in terms of security. For even more features, please see\nthe [GitLab application security](https://docs.gitlab.com/ee/user/application_security/) documentation.\n\n---\n\nThanks for reading! To find out more about the newest security features in GitLab 15, check out\nthe [release post](/releases/2022/05/22/gitlab-15-0-released/). For upcoming\nversion features, see the [Upcoming Releases](/upcoming-releases/) page.\n\nIt is also helpful to check out our [Secure](/direction/secure/) and [Protect](/direction/govern/) roadmaps to get an idea of the direction we\nare headed!\n",[875,9],{"slug":7226,"featured":6,"template":686},"security-gitlab-15","content:en-us:blog:security-gitlab-15.yml","Security Gitlab 15","en-us/blog/security-gitlab-15.yml","en-us/blog/security-gitlab-15",{"_path":7232,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7233,"content":7238,"config":7244,"_id":7246,"_type":14,"title":7247,"_source":16,"_file":7248,"_stem":7249,"_extension":19},"/en-us/blog/security-testing-principles-developer",{"title":7234,"description":7235,"ogTitle":7234,"ogDescription":7235,"noIndex":6,"ogImage":2488,"ogUrl":7236,"ogSiteName":670,"ogType":671,"canonicalUrls":7236,"schema":7237},"5 Security testing principles every developer should know","Developers are looking for guidance and standard practices as they take on more security testing responsibilities.","https://about.gitlab.com/blog/security-testing-principles-developer","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 Security testing principles every developer should know\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"},{\"@type\":\"Person\",\"name\":\"Seth Berger\"}],\n        \"datePublished\": \"2019-09-16\",\n      }",{"title":7234,"description":7235,"authors":7239,"heroImage":2488,"date":7241,"body":7242,"category":679,"tags":7243},[1016,7240],"Seth Berger","2019-09-16","\n## Principles of secure testing and how to do it\n\nSecurity testing is no longer under sole ownership of the security team. New\ntools have made it easy to bring testing into the DevOps model, where developers\ncan review and test code as they build out the app or software. However,\ndevelopers aren’t always on board with conducting security tests themselves:\nNearly half of security professionals surveyed in [GitLab’s 2019 Global\nDeveloper Report](/developer-survey/) (49%) said they\nstruggle to get developers to make remediation of vulnerabilities a priority.\nLike the developers and operations professionals, 50% of security teams\nsurveyed also believe testing is what most slows down development. AppDev leaders can improve their teams' security practices by building team buy-in and adopting tools that make it easy for developers to follow the five principles outlined below.\n\n## Security should be with you every step of the way\n\nWe’ve reached a day and age where security can’t be an afterthought or bolt-on\nactivity: Everyone is responsible for ensuring their work does not put the\ncustomer or business at risk. Security isn't just a box to check either, it’s a\nway of operating that should stay with you through development, deployment, and\nupdates. Developers can adopt security as their own by following these five\nprinciples:\n\n### 1. Evangelize your security efforts\n\nWhile developers are taking more responsibility for security, an overall\nquestion of ownership still remains. Everyone should be responsible for\nsecurity, but all too often that “everyone” comes to mean “no one”. Dev team\nleaders should advocate for security and the proper time to address it. Without\nthe proper advocacy, resources won't be allocated and security can become high-\nrisk technical debt. By shifting security left in the software development\nprocess, developers can allocate resources early on while they are still\nplentiful. Make it easy for your developers to adopt strong security practices\nby creating team-wide guidelines, educating developers on best practices and\ncommon challenges, and standardizing your expectations through both team and\nindividual security metrics.\n\n### 2. Test early, test often\n\nDevSecOps is an important next step in your DevOps initiatives. [Security teams\nare three times as likely to discover bugs before code is merged](/developer-survey/), so test as you\ncode and begin fixing vulnerabilities as early as possible. By incorporating tools\nthat help with dependency scanning, dynamic application security testing (DAST),\nand static application security testing (SAST), developers can get feedback as code is written and committed. These tools can give\ndevelopers information about the security of their code early in the development\nprocess, making it faster and cheaper to remediate compared to making fixes later on.\n[In a mature DevOps model, teams are 90% more likely to test between\n91% and 100% of all code than organizations with early-stage DevOps](/developer-survey/).\nTesting should continue throughout the DevOps lifecycle, so that developers are\nable to change code before it’s integrated into the broader codebase. Frequent testing will\nultimately take less time and fewer resources, speed time to deployment, and\nreduce friction between IT and security.\n\n### 3. Always verify your changes with a second set of eyes\n\nWriting and updating code should always be a joint effort. A second set of eyes\nwill spot potential issues that the author wasn’t able to see, and will reduce\nthe risk of deploying code that still has vulnerabilities. A [randomized buddy\nsystem](/blog/play-reviewer-roulette/) comes in handy here, to ensure that code reviews aren’t being handed to\nthe same person time after time, and allows different team members to\nlook at work that may be different from their own activities. Don't be afraid to\nuse tooling to help implement code reveiws and approvals. Configure your tooling\nto [require approvers within your merge request process](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/).\nCulture is an important element when it comes to code review: Your team of\ndevelopers must care about the integrity of the product or project as a whole,\nand not just the speed or quality of their own code development.\n\n### 4. Keep a master log of every code deployment, dependency, and update\n\nTransparency is key to ensuring quality code. Creating a complete history of your code\nwill be helpful in reviews and incident response, and allows the security team or\ndevelopers to identify exactly when and where a vulnerability occurred. Teams should\nalso minimize any manual build or deployment processes to ensure that their\napplications have full traceability and logging.\nWith a majority of application code being open source, dependencies have become a\nmajor attack surface. What’s more, bugs in open source code generally fly under\nthe radar (no pun intended), undetected by developers until it’s too late.\nUnderstanding, patching, and updating dependencies is critical, as catastrophic\nbreaches ([such as WannaCry](https://en.wikipedia.org/wiki/WannaCry_ransomware_attack))\ncan and have occurred due to a missed update or patch. Security scans using updated vulnerability databases should be run on a regular\nbasis to maintain app security – even on code that has previously been scanned.\n\n### 5. Diversify your security portfolio\n\nEmploy many different types of testing to cover your bases. A single type of\ntesting, like SAST, DAST, pre-release scanning, pen testing, or dependency\nscanning is helpful, but won’t provide a complete view of your application\nenvironment. [Forrester's annual application security report](https://www.forrester.com/report/The+State+Of+Application+Security+2019/-/E-RES145135)\nnotes that security teams are adjusting their practices to help developers respond to\nvulnerabilities at the speed of development. Some teams now conduct software\ncomposition analysis ahead of production, and have moved static application\nsecurity testing (SAST) to early development ([something your team can achieve\nwith GitLab](https://docs.gitlab.com/ee/user/application_security/sast/)).\nOthers are using bug bounty programs to crowdsource vulnerability discovery,\nwhich is particularly helpful for uncovering problems that don’t fall into known\nsecurity flaw patterns.\n\n## Work to achieve a DevSecOps model\n\nNearly 70% of developers are expected to write secure code, but only 25% of\ndevelopers believe they have “good” security practices. DevOps is a great place\nto start: It’s [clear from our data](/developer-survey/)\nthat a more mature DevOps model encourages innovation and collaboration, and\nenables teams to test more code faster. As more teams continue to shift their\nsecurity practices left, DevSecOps will become an advantageous reality for\ndevelopers and security professionals alike.\n\nCover photo by [Patrick Tomasso](https://unsplash.com/@impatrickt?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\non [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,875],{"slug":7245,"featured":6,"template":686},"security-testing-principles-developer","content:en-us:blog:security-testing-principles-developer.yml","Security Testing Principles Developer","en-us/blog/security-testing-principles-developer.yml","en-us/blog/security-testing-principles-developer",{"_path":7251,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7252,"content":7258,"config":7265,"_id":7267,"_type":14,"title":7268,"_source":16,"_file":7269,"_stem":7270,"_extension":19},"/en-us/blog/sentry-integration-blog-post",{"title":7253,"description":7254,"ogTitle":7253,"ogDescription":7254,"noIndex":6,"ogImage":7255,"ogUrl":7256,"ogSiteName":670,"ogType":671,"canonicalUrls":7256,"schema":7257},"Sentry's GitLab integration streamlines error remediation","Your code has bugs, my code has bugs, everyone’s code has bugs (probably). Let’s fix that.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679964/Blog/Hero%20Images/sentry-io-blog.jpg","https://about.gitlab.com/blog/sentry-integration-blog-post","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Streamline and shorten error remediation with Sentry’s new GitLab integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Eva Sasson\"}],\n        \"datePublished\": \"2019-01-25\",\n      }",{"title":7259,"description":7254,"authors":7260,"heroImage":7255,"date":7262,"body":7263,"category":1318,"tags":7264},"Streamline and shorten error remediation with Sentry’s new GitLab integration",[7261],"Eva Sasson","2019-01-25","\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/KUHk1uuXWhA?rel=0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nSentry is open source error tracking that gives visibility across your entire stack and provides the details you need to fix bugs, ASAP. Because the only thing better than visibility and details is more visibility and details, Sentry improved their [GitLab integration](https://docs.sentry.io/workflow/integrations/global-integrations/gitlab/?utm_source=GitLab&utm_medium=blog&utm_campaign=GitLab_GTM) by adding [release](https://docs.sentry.io/workflow/releases/?platform=browser&utm_source=GitLab&utm_medium=blog&utm_campaign=GitLab_GTM) and [commit](https://docs.sentry.io/workflow/releases/?platform=browser&utm_source=GitLab&utm_medium=blog&utm_campaign=GitLab_GTM#link-repository) tracking as well as [suspect commits](https://docs.sentry.io/workflow/releases/?platform=browser&utm_source=GitLab&utm_medium=blog&utm_campaign=GitLab_GTM#after-linking-a-repository).\n\n### Streamline your workflow with issue management and creation\n\nWhen you receive an alert about an error, the last thing you want to do is to jump around 20 different tools trying to find out exactly what happened and where. Developers with both Sentry and GitLab in their application lifecycle benefit from issue management and issue creation to their GitLab accounts directly in the Sentry UI, alleviating some of the hassle of back-and-forth tool toggling.\n\n![GitLab account in Sentry](https://about.gitlab.com/images/blogimages/sentry/gitlab-sentry-integration.png){: .shadow.large.center}\n\nOf course, less tool jumping results in a more streamlined triaging process and shortened time to issue resolution – something that benefits the whole team.\n\n![Creating GitLab issue](https://about.gitlab.com/images/blogimages/sentry/create-gitlab-issue.png){: .shadow.medium.center}\n\nHave a GitLab issue that wasn’t created in Sentry? No problem. Existing issues are also easily linked.\n\n![Import GitLab issue](https://about.gitlab.com/images/blogimages/sentry/import-gitlab-issue.png){: .shadow.medium.center}\n\n### Find and fix bugs faster with release and commit tracking\n\nWhy stop at streamlining the triaging process, when we can also make issue resolution more efficient? Sentry’s GitLab integration now utilizes GitLab commits to find and fix bugs faster.\n\nWith the newly added release and commit tracking, an enhanced release overview page uncovers new and resolved issues, files changed, and authors. Developers can also resolve issues via commit messages or merge requests, see suggested assignees for issues, and receive detailed deploy emails.\n\nWant a big flashing arrow that points to an error’s root cause? Sentry’s suspect commits feature exposes the commit that likely introduced an error as well as the developer who wrote the broken code.\n\n![Suspect commits feature](https://about.gitlab.com/images/blogimages/sentry/suspect-commits-feature.png){: .shadow.medium.center}\n\nKeep in mind that this feature is available for Sentry users on “Teams” plans and above.\n{: .note}\n\nCheck out [Sentry’s GitLab integration documentation](https://docs.sentry.io/workflow/integrations/global-integrations/gitlab/?utm_source=GitLab&utm_medium=blog&utm_campaign=GitLab_GTM) to get started.\n\n### What’s next?\n\nAgain, why stop there, when we can do even more? GitLab is currently working to bring Sentry into the GitLab interface. Soon, GitLab and Sentry users will see their Sentry errors listed in their GitLab projects. Read the documentation on [the integration here](https://docs.gitlab.com/ee/operations/error_tracking.html).\n\n### About the guest author\n\nEva Sasson is a Product Marketer at [Sentry.io](https://sentry.io/welcome/), an open source error-tracking tool that gives developers the contextual information they need to resolve issues quickly, and integrates with the other development tools across the stack.\n",[109,749,9,231,682,875,1158,1829,683],{"slug":7266,"featured":6,"template":686},"sentry-integration-blog-post","content:en-us:blog:sentry-integration-blog-post.yml","Sentry Integration Blog Post","en-us/blog/sentry-integration-blog-post.yml","en-us/blog/sentry-integration-blog-post",{"_path":7272,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7273,"content":7278,"config":7284,"_id":7286,"_type":14,"title":7287,"_source":16,"_file":7288,"_stem":7289,"_extension":19},"/en-us/blog/set-up-infrastructure-for-cloud-development-environments",{"title":7274,"description":7275,"ogTitle":7274,"ogDescription":7275,"noIndex":6,"ogImage":5015,"ogUrl":7276,"ogSiteName":670,"ogType":671,"canonicalUrls":7276,"schema":7277},"Cloud infrastructure for on-demand development in GitLab","Learn how to set up the requirements, manage Kubernetes clusters in different clouds, create the first workspaces and custom images, and get tips and troubleshooting.","https://about.gitlab.com/blog/set-up-infrastructure-for-cloud-development-environments","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Set up your infrastructure for on-demand, cloud-based development environments in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2023-07-13\",\n      }",{"title":7279,"description":7275,"authors":7280,"heroImage":5015,"date":7281,"body":7282,"category":791,"tags":7283},"Set up your infrastructure for on-demand, cloud-based development environments in GitLab",[2473],"2023-07-13","Cloud-based development environments enable a better developer onboarding experience and help make teams more efficient. In this tutorial, you'll learn how to ready your infrastructure for on-demand, cloud-based development environments. You'll also learn how to set up the requirements, manage Kubernetes clusters in different clouds, create your first workspaces and custom images, and get tips for troubleshooting.\n\nThe GitLab agent for Kubernetes, an OAuth GitLab app, and a proxy pod deployment make the setup reproducible in different Kubernetes cluster environments and follow cloud-native best practices. Bringing your infrastructure allows platform teams to store the workspace data securely, control resource usage, harden security, and troubleshoot the deployments in known ways.\n\nThis blog post is a long read so feel free to navigate to the sections of interest. However, if you want to follow the tutorial step by step, the sections depend on one another for the parts pertaining to infrastructure setup.\n\n- [Development environments on your infrastructure](#development-environments-on-your-infrastructure)\n- [Requirements](#requirements)\n    - [Workspaces domain](#workspaces-domain)\n    - [TLS certificates](#tls-certificates)\n- [GitLab OAuth application ](#gitlab-oauth-application)\n- [Kubernetes cluster setup](#kubernetes-cluster-setup)\n    - [Set up infrastructure with Google Kubernetes Engine (GKE)](#set-up-infrastructure-with-google-kubernetes-engine=gke)\n    - [Set up infrastructure with Amazon Elastic Kubernetes Service (EKS)](#set-up-infrastructure-with-amazon-elastic-kubernetes-service-eks)\n    - [Set up infrastructure with Azure Managed Kubernetes Service (AKS)](#set-up-infrastructure-with-azure-managed-kubernetes-service-aks)\n    - [Set up infrastructure with Civo Cloud Kubernetes](#set-up-infrastructure-with-civo-cloud-kubernetes)\n    - [Set up infrastructure with self-managed Kubernetes](#set-up-infrastructure-with-self-managed-kubernetes)\n- [Workspaces proxy installation into Kubernetes](#workspaces-proxy-installation-into-kubernetes)\n- [Agent for Kubernetes installation](#agent-for-kubernetes-installation)\n- [Workspaces creation](#workspaces-creation)\n    - [Create the first workspaces](#create-the-first-workspaces)\n    - [Custom workspace container images](#custom-workspace-container-images)\n- [Tips](#tips)\n    - [Certificate management](#certificate-management)\n    - [Troubleshooting](#troubleshooting)\n    - [Contribute](#contribute)\n- [Share your feedback](#share-your-feedback)\n\n## Development environments on your infrastructure\nSecure, on-demand, cloud-based development workspaces are [available in beta for public projects](/blog/introducing-workspaces-beta/) for Premium and Ultimate customers. The first iteration allows you to bring your own infrastructure as a Kubernetes cluster. GitLab already deeply integrates with Kubernetes through the GitLab agent for Kubernetes, setting the foundation for configuration and cluster management.\n\nUsers can define and use a development environment template in a project. Workspaces in GitLab support the [devfile specification](https://docs.gitlab.com/ee/user/workspace/#devfile) as `.devfile.yaml` in the project repository root. The devfile attributes allow configuring of the workspace. For example, the `image` attribute specifies the container image to run and create the workspace in isolated container environments. The containers require a cluster orchestrator, such as Kubernetes, that manages resource usage and ensures data security and safety. Workspaces also need authorization: Project source code may contain sensitive intellectual property or otherwise confidential data in specific environments. The setup requires a GitLab OAuth application as the foundation here.\n\nThe following steps provide an in-depth setup guide for different cloud providers. If you prefer to set up your own environment, please follow the [documentation for workspace prerequisites](https://docs.gitlab.com/ee/user/workspace/#prerequisites). In general, we will practice the following steps:\n0. (Optional) Register a workspaces domain, and create TLS certificates.\n1. Create a Kubernetes cluster and configure access and requirements.\n2. Install an Ingress controller.\n3. Set up the workspaces proxy with the domain, TLS certificates, and OAuth app.\n4. Create a new GitLab group with a GitLab agent project. The agent can be used for all projects in that group.\n5. Install the GitLab agent for Kubernetes using the UI provided Helm chart command.\n6. Create an example project with a devfile configuration for workspaces.\n\nSome commands do not use the terminal indicator (`$` or `#`) to support easier copy-paste of command blocks into terminals.\n\n## Requirements\nThe steps in this blog post require the following CLI tools:\n1. `kubectl` and `helm` for Kubernetes\n2. `certbot` for Let's Encrypt\n3. git, curl, dig, openssl, and sslscan for troubleshooting\n\n### Workspaces domain\nWorkspaces require a domain with DNS entries. Cloud providers, for example, Google Cloud, also provide domain services which integrate more easily. You can also register and manage domains with your preferred provider.\n\nThe required DNS entries will be:\n- Wildcard DNS (`*.remote-dev.dev`) and hostname (`remote-dev.dev`) A/AAAA records pointing to the external Kubernetes external IP: `kubectl get services -A`\n- (Optional, with Let's Encrypt) ACME DNS challenge entries as TXT records\n\nAfter acquiring a domain, wait until the Kubernetes setup is ready and extract the A/AAAA records for the DNS settings. The following example shows how `remote-dev.dev` is configured in the Google Cloud DNS service.\n\n![GitLab remote development workspaces, example DNS configuration for remote-dev.dev](https://about.gitlab.com/images/blogimages/infrastructure-cloud-development-environments/gitlab_remote_dev_workspaces_google_cloud_dns_remote-dev.dev-entries.png){: .shadow}\n\nExport shell variables that define the workspaces domains, and the email contact. These variables will be used in all setup steps below.\n\n```\nexport EMAIL=\"user@company.com\"\nexport GITLAB_WORKSPACES_PROXY_DOMAIN=\"remote-dev.dev\"\nexport GITLAB_WORKSPACES_WILDCARD_DOMAIN=\"*.remote-dev.dev\"\n```\n\n**Note:** This blog post will show the example domain `remote-dev.dev` for better understanding with a working example. The domain `remote-dev.dev` is maintained by the [Developer Evangelism team at GitLab](/handbook/marketing/developer-relations/developer-evangelism/projects/). There are no public demo environments available at the time of writing this blog post.\n\n### TLS certificates\nTLS certificates can be managed with different methods. To get started quickly, it is recommended to follow the [documentation steps](https://docs.gitlab.com/ee/user/workspace/#prerequisites) with Let's Encrypt and later consider production requirements with TLS certificates.\n\n```shell\ncertbot -d \"${GITLAB_WORKSPACES_PROXY_DOMAIN}\" \\\n  -m \"${EMAIL}\" \\\n  --config-dir ~/.certbot/config \\\n  --logs-dir ~/.certbot/logs \\\n  --work-dir ~/.certbot/work \\\n  --manual \\\n  --preferred-challenges dns certonly\n\n  certbot -d \"${GITLAB_WORKSPACES_WILDCARD_DOMAIN}\" \\\n  -m \"${EMAIL}\" \\\n  --config-dir ~/.certbot/config \\\n  --logs-dir ~/.certbot/logs \\\n  --work-dir ~/.certbot/work \\\n  --manual \\\n  --preferred-challenges dns certonly\n```\n\nThe Let's Encrypt CLI prompts you for the ACME DNS challenge. This requires setting TXT records for the challenge session immediately. Add the DNS records and specify a low TTL (time-to-live) of 300 seconds to update the records during the first steps.\n\n```\n_acme-challenge TXT \u003Cstringfromletsencryptacmechallenge>\n```\n\nYou can verify the DNS records using the `dig` CLI command.\n\n```shell\n$ dig _acme-challenge.remote-dev.dev txt\n...\n;; ANSWER SECTION:\n_acme-challenge.remote-dev.dev.\t246 IN\tTXT\t\"TlGRM9JGdXHGVklPWgytflxWDF82Sv04nF--Wl9JFvg\"\n_acme-challenge.remote-dev.dev.\t246 IN\tTXT\t\"CqG_54w6I0heWF3wLMAmUAitPcUMs9qAU9b8QhBWFj8\"\n```\n\nOnce the Let's Encrypt routine is complete, note the TLS certificate location.\n\n```\nSuccessfully received certificate.\nCertificate is saved at: /Users/mfriedrich/.certbot/config/live/remote-dev.dev/fullchain.pem\nKey is saved at:         /Users/mfriedrich/.certbot/config/live/remote-dev.dev/privkey.pem\nThis certificate expires on 2023-08-15.\nThese files will be updated when the certificate renews.\n\nSuccessfully received certificate.\nCertificate is saved at: /Users/mfriedrich/.certbot/config/live/remote-dev.dev-0001/fullchain.pem\nKey is saved at:         /Users/mfriedrich/.certbot/config/live/remote-dev.dev-0001/privkey.pem\nThis certificate expires on 2023-08-15.\nThese files will be updated when the certificate renews.\n```\n\nExport the TLS certificate paths into environment variables for the following setup steps.\n\n```shell\nexport WORKSPACES_DOMAIN_CERT=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}/fullchain.pem\"\nexport WORKSPACES_DOMAIN_KEY=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}/privkey.pem\"\n\nexport WILDCARD_DOMAIN_CERT=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}-0001/fullchain.pem\"\nexport WILDCARD_DOMAIN_KEY=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}-0001/privkey.pem\"\n```\n\n**Note**: If you prefer to use your certificates, please copy the files into a safe location, and export the environment variables with the path details.\n\n## GitLab OAuth application\n_After preparing the requirements, continue with the components setup._\n\nCreate a [group-owned OAuth application](https://docs.gitlab.com/ee/integration/oauth_provider.html) for the remote development workspaces group. Creating a centrally managed app with a service account or group with limited access is recommended for production use.\n\nNavigate into the group `Settings > Applications` and specify the following values:\n\n1. Name: `Remote Development workspaces by \u003Cresponsible team> - \u003Cdomain>`. Add the reponsible team that is trusted in your organization. For debugging, add the domain. There might be multiple authorization groups, this helps the identification which workspace domain is used.\n2. Redirect URI: `https://\u003CGITLAB_WORKSPACES_PROXY_DOMAIN>/auth/callback`. Replace `GITLAB_WORKSPACES_PROXY_DOMAIN` with the domain string value.\n3. Set the scopes to `api, read_user, openid, profile` .\n\n![GitLab remote development workspaces, OAuth application in the group settings](https://about.gitlab.com/images/blogimages/infrastructure-cloud-development-environments/gitlab_remote_dev_workspaces_oauth_app_create.png){: .shadow}\n\nStore the OAuth application details in your password vault, and export them as shell environment variables for the next setup steps.\n\nCreate a configuration secret for the proxy as a signing key (`SIGNING_KEY`), and store it in a safe place (for example, use a secrets vault like 1Password to create and store the key).\n\n```\nexport CLIENT_ID=\"XXXXXXXXX\" # Look into password vault and set\nexport CLIENT_SECRET=\"XXXXXXXXXX\" # Look into password vault and set\nexport REDIRECT_URI=\"https://${GITLAB_WORKSPACES_PROXY_DOMAIN}/auth/callback\"\n\nexport GITLAB_URL=\"https://gitlab.com\" # Replace with your self-managed GitLab instance URL if not using GitLab.com SaaS\nexport SIGNING_KEY=\"a_random_key_consisting_of_letters_numbers_and_special_chars\" # Look into password vault and set\n```\n\n## Kubernetes cluster setup\nThe following sections describe how to set up a Kubernetes cluster in different cloud and on-premises environments and install an [ingress controller](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/) for HTTP access. After completing the Kubernetes setup, you can continue with the workspaces proxy and agent setup steps.\n\n**Choose one method to create a Kubernetes cluster. Note: Use `amd64` as platform architecture [until multi-architecture support is available for running workspaces](https://gitlab.com/groups/gitlab-org/-/epics/10594).** Cloud environments with Arm support will not work yet, for example AWS EKS on Graviton EC2 instances.\n\nYou should have defined the following variables from the previous setup steps:\n\n```sh\nexport EMAIL=\"user@company.com\"\nexport GITLAB_WORKSPACES_PROXY_DOMAIN=\"remote-dev.dev\"\nexport GITLAB_WORKSPACES_WILDCARD_DOMAIN=\"*.remote-dev.dev\"\n\nexport WORKSPACES_DOMAIN_CERT=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}/fullchain.pem\"\nexport WORKSPACES_DOMAIN_KEY=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}/privkey.pem\"\n\nexport WILDCARD_DOMAIN_CERT=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}-0001/fullchain.pem\"\nexport WILDCARD_DOMAIN_KEY=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}-0001/privkey.pem\"\n\nexport CLIENT_ID=\"XXXXXXXXX\" # Look into password vault and set\nexport CLIENT_SECRET=\"XXXXXXXXXX\" # Look into password vault and set\nexport REDIRECT_URI=\"https://${GITLAB_WORKSPACES_PROXY_DOMAIN}/auth/callback\"\n\nexport GITLAB_URL=\"https://gitlab.com\" # Replace with your self-managed GitLab instance URL if not using GitLab.com SaaS\nexport SIGNING_KEY=\"XXXXXXXX\" # Look into password vault and set\n\n```\n\n### Set up infrastructure with Google Kubernetes Engine (GKE)\n\n[Install and configure the Google Cloud SDK and `gcloud` CLI](https://cloud.google.com/sdk/docs/install?hl=en), and install the `gke-gcloud-auth-plugin` plugin to authenticate against Google Cloud.\n\n```shell\nbrew install --cask google-cloud-sdk\n\ngcloud components install gke-gcloud-auth-plugin\n\ngcloud auth login\n```\n\nCreate a new GKE cluster using the `gcloud` command, or follow the steps in the Google Cloud Console.\n\n```shell\n\nexport GCLOUD_PROJECT=group-community\nexport GCLOUD_CLUSTER=de-remote-development-1\n\ngcloud config set project $GCLOUD_PROJECT\n\n# Create cluster (modify for your needs)\ngcloud container clusters create $GCLOUD_CLUSTER \\\n    --release-channel stable \\\n    --zone us-central1-c \\\n    --project $GCLOUD_PROJECT\n\n# Verify cluster\ngcloud container clusters list\n\nNAME                     LOCATION         MASTER_VERSION   MASTER_IP       MACHINE_TYPE  NODE_VERSION       NUM_NODES  STATUS\nde-remote-development-1  us-central1-c    1.26.3-gke.1000  34.136.33.199   e2-medium     1.26.3-gke.1000    3          RUNNING\n\ngcloud container clusters get-credentials $GCLOUD_CLUSTER --zone us-central1-c --project $GCLOUD_PROJECT\nFetching cluster endpoint and auth data.\nkubeconfig entry generated for de-remote-development-1.\n```\n\n1. The setup requires the [`Kubernetes Engine Admin` role in Google IAM](https://cloud.google.com/kubernetes-engine/docs/concepts/access-control?hl=en#recommendations) to create ClusterRoleBindings.\n2. Create a new Kubernetes cluster (do not use Autopilot).\n3. Ensure that [cluster autoscaling](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-autoscaler?hl=en) is enabled in the GKE cluster.\n4. Verify that a [default Storage Class](https://cloud.google.com/kubernetes-engine/docs/concepts/persistent-volumes?hl=en#storageclasses) has been defined.\n5. Install an Ingress controller, for example [ingress-nginx](https://kubernetes.github.io/ingress-nginx/deploy/#gce-gke). Follow the documentation and run the following commands to install `ingress-nginx` into the Kubernetes cluster.\n\n```shell\nkubectl create clusterrolebinding cluster-admin-binding \\\n  --clusterrole cluster-admin \\\n  --user $(gcloud config get-value account)\n\nkubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.7.1/deploy/static/provider/cloud/deploy.yaml\n```\n\nPrint the external IP for the DNS records, and update wildcard DNS (`*.remote-dev.dev`) and hostname (`remote-dev.dev`).\n\n```shell\ngcloud container clusters list\n\nkubectl get services -A\n```\n\n### Set up infrastructure with Amazon Elastic Kubernetes Service (EKS)\nCreating an Amazon EKS cluster requires [cluster IAM roles](https://docs.aws.amazon.com/eks/latest/userguide/create-cluster.html). You can the [`eksctl` CLI for Amazon EKS](https://eksctl.io/), which automatically creates the roles. `eksctl` [requires the AWS IAM Authenticator for Kubernetes](https://github.com/weaveworks/eksctl/blob/main/README.md#prerequisite), which will get pulled with Homebrew automatically on macOS.\n\n```shell\nbrew install eksctl awscli aws-iam-authenticator\naws configure\n\neksctl create cluster --name remote-dev \\\n    --region us-west-2 \\\n    --node-type m5.xlarge \\\n    --nodes 3 \\\n    --nodes-min=1 \\\n    --nodes-max=4 \\\n    --version=1.26 \\\n    --asg-access\n```\n\nThe eksctl command uses the [`--asg-access`, `--nodes-min/max` parameters for auto-scaling](https://eksctl.io/usage/autoscaling/). The autoscaler requires [additional configuration steps](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md), alternatively [Karpenter is supported in Amazon EKS](https://karpenter.sh/docs/getting-started/getting-started-with-karpenter/). Review the [autoscaling documentation](https://docs.aws.amazon.com/eks/latest/userguide/autoscaling.html), and [default Storage Class `gp2`](https://docs.aws.amazon.com/eks/latest/userguide/storage-classes.html) fulfilling the requirements. The Kubernetes configuration is automatically updated locally.\n\nInstall the [Nginx Ingress controller for EKS](https://kubernetes.github.io/ingress-nginx/deploy/#aws). Follow the documentation and run the following command to install `ingress-nginx` into the Kubernetes cluster.\n\n```shell\nkubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.0/deploy/static/provider/aws/deploy.yaml\n```\n\nPrint the external IP for the DNS records, and update wildcard DNS (`*.remote-dev.dev`) and hostname (`remote-dev.dev`).\n\n```shell\neksctl get cluster --region us-west-2 --name remote-dev\n\nkubectl get services -A\n```\n\n### Set up infrastructure with Azure Managed Kubernetes Service (AKS)\nInstall [Azure CLI](https://learn.microsoft.com/en-us/azure/aks/learn/quick-kubernetes-deploy-cli).\n\n```shell\nbrew install azure-cli\n\naz login\n```\n\nReview the documentation for the [cluster autoscaler in AKS](https://learn.microsoft.com/en-us/azure/aks/cluster-autoscaler) and the [default Storage Class being `managed-csi`](https://learn.microsoft.com/en-us/azure/aks/concepts-storage#storage-classes), create a new resource group, and create a new Kubernetes cluster. Download the Kubernetes configuration to continue with the `kubectl` commands.\n\n```shell\naz group create --name remote-dev-rg --location eastus\n\naz aks create \\\n--resource-group remote-dev-rg \\\n--name remote-dev \\\n--node-count 1 \\\n--vm-set-type VirtualMachineScaleSets \\\n--load-balancer-sku standard \\\n--enable-cluster-autoscaler \\\n--min-count 1 \\\n--max-count 3\n\naz aks get-credentials --resource-group remote-dev-rg --name remote-dev\n```\n\nInstall the [Nginx ingress controller in AKS](https://learn.microsoft.com/en-us/azure/aks/ingress-basic?tabs=azure-cli#basic-configuration). Follow the documentation and run the following commands to install `ingress-nginx` into the Kubernetes cluster.\n\n```shell\nNAMESPACE=ingress-basic\n\nhelm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx\nhelm repo update\n\nhelm install ingress-nginx ingress-nginx/ingress-nginx \\\n  --create-namespace \\\n  --namespace $NAMESPACE \\\n  --set controller.service.annotations.\"service\\.beta\\.kubernetes\\.io/azure-load-balancer-health-probe-request-path\"=/healthz\n```\n\nPrint the external IP for the DNS records, and update wildcard DNS (`*.remote-dev.dev`) and hostname (`remote-dev.dev`).\n\n```shell\nkubectl get services --namespace ingress-basic -o wide -w ingress-nginx-controller\n\nkubectl get services -A\n```\n\n### Set up infrastructure with Civo Cloud Kubernetes\nInstall and configure the [Civo CLI](https://www.civo.com/docs/kubernetes/create-a-cluster#creating-a-cluster-using-civo-cli), and create a Kubernetes cluster using 2 nodes, 4 CPUs, 8 GB RAM.\n\n```shell\ncivo kubernetes create remote-dev -n 2 -s g4s.kube.large\n\ncivo kubernetes config remote-dev --save\nkubectl config use-context remote-dev\n```\n\nYou have full permissions on the cluster to create ClusterRoleBindings. The [default Storage Class](https://www.civo.com/docs/kubernetes/kubernetes-volumes#creating-a-persistent-volume-claim-pvc) is set to 'civo-volume'.\n\nInstall the [Nginx Ingress controller using Helm](https://kubernetes.github.io/ingress-nginx/deploy/#quick-start). Follow the documentation and run the following command to install `ingress-nginx` into the Kubernetes cluster.\n\n```shell\nhelm upgrade --install ingress-nginx ingress-nginx \\\n  --repo https://kubernetes.github.io/ingress-nginx \\\n  --namespace ingress-nginx --create-namespace\n\n```\n\nPrint the external IP for the DNS records, and update wildcard DNS (`*.remote-dev.dev`) and hostname (`remote-dev.dev`).\n\n```shell\ncivo kubernetes show remote-dev\n\nkubectl get services -A\n```\n\n### Set up infrastructure with self-managed Kubernetes\nThe process follows similar steps, requiring a user with permission to create `ClusterRoleBinding` resources. The [Nginx Ingress controller](https://kubernetes.github.io/ingress-nginx/deploy/#quick-start) is the fastest path forward. Once the cluster is ready, print the load balancer IP for the DNS records, and create/update A/AAAA record for wildcard DNS (`*.remote-dev.dev`) and hostname (`remote-dev.dev`) pointing to the load balancer IP.\n\n## Workspaces proxy installation into Kubernetes\n_After completing the Kubernetes cluster setup with one of your preferred providers, please continue with the next steps._\n\nAdd the Helm repository for the workspaces proxy (it is using the [Helm charts feature in the GitLab package registry](https://docs.gitlab.com/ee/user/packages/helm_repository/)).\n\n```shell\nhelm repo add gitlab-workspaces-proxy \\\n  https://gitlab.com/api/v4/projects/gitlab-org%2fremote-development%2fgitlab-workspaces-proxy/packages/helm/devel\n```\n\nInstall the gitlab-workspaces-proxy, and optionally [specify the most current chart version](https://gitlab.com/gitlab-org/remote-development/gitlab-workspaces-proxy/-/blob/main/helm/Chart.yaml). If you are using a different ingress controller than Nginx, you need to change the `ingress.className` key. Re-run the command when new TLS certificates need to be installed.\n\n```shell\nhelm repo update\n\nhelm upgrade --install gitlab-workspaces-proxy \\\n  gitlab-workspaces-proxy/gitlab-workspaces-proxy \\\n  --version 0.1.6 \\\n  --namespace=gitlab-workspaces \\\n  --create-namespace \\\n  --set=\"auth.client_id=${CLIENT_ID}\" \\\n  --set=\"auth.client_secret=${CLIENT_SECRET}\" \\\n  --set=\"auth.host=${GITLAB_URL}\" \\\n  --set=\"auth.redirect_uri=${REDIRECT_URI}\" \\\n  --set=\"auth.signing_key=${SIGNING_KEY}\" \\\n  --set=\"ingress.host.workspaceDomain=${GITLAB_WORKSPACES_PROXY_DOMAIN}\" \\\n  --set=\"ingress.host.wildcardDomain=${GITLAB_WORKSPACES_WILDCARD_DOMAIN}\" \\\n  --set=\"ingress.tls.workspaceDomainCert=$(cat ${WORKSPACES_DOMAIN_CERT})\" \\\n  --set=\"ingress.tls.workspaceDomainKey=$(cat ${WORKSPACES_DOMAIN_KEY})\" \\\n  --set=\"ingress.tls.wildcardDomainCert=$(cat ${WILDCARD_DOMAIN_CERT})\" \\\n  --set=\"ingress.tls.wildcardDomainKey=$(cat ${WILDCARD_DOMAIN_KEY})\" \\\n  --set=\"ingress.className=nginx\"\n```\n\nThe chart installs and configures the ingress automatically. You can verify the setup by getting the `Ingress` resource type:\n\n```shell\nkubectl get ingress -n gitlab-workspaces\n\nNAME                      CLASS   HOSTS                             ADDRESS   PORTS     AGE\ngitlab-workspaces-proxy   nginx   remote-dev.dev,*.remote-dev.dev             80, 443   9s\n```\n\n### Agent for Kubernetes installation\nCreate the agent configuration file in `.gitlab/agents/\u003Cagentname>/config.yaml`, add to git, and push it into the repository. The `remote_development` key specifies the `dns_zone`, which must be set to the workspaces domain. Additionally, the integration needs to be enabled. The `observability` key intentionally configures [debug logging](https://docs.gitlab.com/ee/user/clusters/agent/work_with_agent.html#debug-the-agent) for the first setup to troubleshoot faster. You can adjust the `logging` levels for production usage.\n\n```shell\nexport GL_AGENT_K8S=remote-dev-dev\n\n$ mkdir agent-kubernetes && cd agent-kubernetes\n$ mkdir -p .gitlab/agents/${GL_AGENT_K8S}/\n\n$ cat \u003C\u003CEOF >.gitlab/agents/${GL_AGENT_K8S}/config.yaml\nremote_development:\n    enabled: true\n    dns_zone: \"${GITLAB_WORKSPACES_PROXY_DOMAIN}\"\n\nobservability:\n  logging:\n    level: debug\n    grpc_level: warn\nEOF\n\n$ git add .gitlab/agents/${GL_AGENT_K8S}/config.yaml\n$ git commit -avm \"Add agent for Kubernetes configuration\"\n# adjust the URL to your GitLab server URL and project path\n$ git remote add origin https://gitlab.example.com/remote-dev-workspaces/agent-kubernetes.git\n# will create a private project when https/PAT is used\n$ git push\n```\n\nOpen the GitLab project in your browser, navigate into `Operate > Kubernetes Clusters`, and click the `Connect a new cluster (agent)` button. Select the agent from the configuration dropdown, and click `Register`. The form generates a ready-to-use Helm chart CLI command. Similar to the command below, replace `XXXXXXXXXXREPLACEME` with the actual token value.\n\n```shell\nhelm repo add gitlab https://charts.gitlab.io\nhelm repo update\nhelm upgrade --install remote-dev-dev gitlab/gitlab-agent \\\n    --namespace gitlab-agent-remote-dev-dev \\\n    --create-namespace \\\n    --set image.tag=v16.0.1 \\\n    --set config.token=XXXXXXXXXXREPLACEME \\\n    --set config.kasAddress=wss://kas.gitlab.com # Replace with your self-managed GitLab KAS instance URL if not using GitLab.com SaaS\n```\n\nRun the commands, and verify that the agent is connected in the `Operate > Kubernetes Clusters` overview. You can access the pod logs using the following command:\n\n```shell\n$ kubectl get ns\nNAME                          STATUS   AGE\ngitlab-agent-remote-dev-dev   Active   9d\ngitlab-workspaces             Active   22d\n...\n\n$ kubectl logs -f -l app.kubernetes.io/name=gitlab-agent -n gitlab-agent-$GL_AGENT_K8S\n```\n\n_Congrats! Your infrastructure setup for on-demand, cloud-based development environments is complete._\n\n## Workspaces creation\nAfter completing the infrastructure setup, you must verify that all components work together and users can create workspaces. You can fork or import the [`example-python-http-simple` project](https://gitlab.com/gitlab-de/use-cases/remote-development/example-python-http-simple) into your GitLab group with access to the GitLab agent for Kubernetes to try it immediately. The project provides a simple Python web app with Flask that provides different HTTP routes. Alternatively, start with a new project and create a `.devfile.yaml` with the [example configuration](https://docs.gitlab.com/ee/user/workspace/#example-configurations).\n\nOptional: Inspect the [`.devfile.yaml`](https://docs.gitlab.com/ee/user/workspace/#devfile) file to learn about the configuration format. We will look into the `image` key later.\n\n```yaml\nschemaVersion: 2.2.0\ncomponents:\n  - name: py\n    attributes:\n      gl/inject-editor: true\n    container:\n      # Use a custom image that supports arbitrary user IDs.\n      # NOTE: THIS IMAGE IS NOT ACTIVELY MAINTAINED. DEMO USE CASES ONLY, DO NOT USE IN PRODUCTION.\n      # Source: https://gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id\n      image: registry.gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id:latest\n      memoryRequest: 1024M\n      memoryLimit: 2048M\n      cpuRequest: 500m\n      cpuLimit: 1000m\n      endpoints:\n        - name: http-python\n          targetPort: 8080\n```\n\n### Create the first workspaces\nNavigate to the `Your Work > Workspaces` menu and create a new workspace. Search for the project name, select the agent for Kubernetes, and create the workspace.\n\n![GitLab remote development workspaces, Python example](https://about.gitlab.com/images/blogimages/infrastructure-cloud-development-environments/gitlab_remote_dev_workspaces_python.png){: .shadow}\n\nOpen two terminals to follow the workspaces proxy and agent logs in the Kubernetes cluster.\n\n```shell\n$ kubectl logs -f -l app.kubernetes.io/name=gitlab-workspaces-proxy -n gitlab-workspaces\n\n{\"level\":\"info\",\"ts\":1686331102.886607,\"caller\":\"server/server.go:74\",\"msg\":\"Starting proxy server...\"}\n{\"level\":\"info\",\"ts\":1686331133.146862,\"caller\":\"upstream/tracker.go:47\",\"msg\":\"New upstream added\",\"host\":\"8080-workspace-62029-5534214-2vxdxq.remote-dev.dev\",\"backend\":\"workspace-62029-5534214-2vxdxq.gl-rd-ns-62029-5534214-2vxdxq\",\"backend_port\":8080}\n2023/06/09 17:21:10 getHostnameFromState state=https://60001-workspace-62029-5534214-2vxdxq.remote-dev.dev/folder=/projects/demo-python-http-simple\n```\n\n```shell\n$ kubectl logs -f -l app.kubernetes.io/name=gitlab-agent -n gitlab-agent-$GL_AGENT_K8S\n\n{\"level\":\"debug\",\"time\":\"2023-06-09T18:36:19.839Z\",\"msg\":\"Applied event\",\"mod_name\":\"remote_development\",\"apply_event\":\"WaitEvent{ GroupName: \\\"wait-0\\\", Status: \\\"Pending\\\", Identifier: \\\"gl-rd-ns-62029-5534214-k66cjy_workspace-62029-5534214-k66cjy-gl-workspace-data__PersistentVolumeClaim\\\" }\",\"agent_id\":62029}\n{\"level\":\"debug\",\"time\":\"2023-06-09T18:36:19.866Z\",\"msg\":\"Received update event\",\"mod_name\":\"remote_development\",\"workspace_namespace\":\"gl-rd-ns-62029-5534214-k66cjy\",\"workspace_name\":\"workspace-62029-5534214-k66cjy\",\"agent_id\":62029}\n{\"level\":\"debug\",\"time\":\"2023-06-09T18:36:43.627Z\",\"msg\":\"Applied event\",\"mod_name\":\"remote_development\",\"apply_event\":\"WaitEvent{ GroupName: \\\"wait-0\\\", Status: \\\"Successful\\\", Identifier: \\\"gl-rd-ns-62029-5534214-k66cjy_workspace-62029-5534214-k66cjy_apps_Deployment\\\" }\",\"agent_id\":62029}\n```\n\nWait until the workspace is provisioned successfully, and click to open the HTTP URL, example format `https://60001-workspace-62029-5534214-2vxdxq.remote-dev.dev/?folder=%2Fprojects%2Fexample-python-http-simple`. The GitLab OAuth application will ask you for authorization.\n\n![GitLab OAuth provider app, example with the Developer Evangelism demo environment](https://about.gitlab.com/images/blogimages/infrastructure-cloud-development-environments/gitlab_remote_dev_workspaces_oauth_app.png){: .shadow}\n\nYou can select the Web IDE menu, open a new terminal (`cmd shift p` and search for `terminal create`). More shortcuts and Web IDE usage are documented [here](https://docs.gitlab.com/ee/user/project/web_ide/).\n\n![GitLab remote development workspaces, Python example, create terminal](https://about.gitlab.com/images/blogimages/infrastructure-cloud-development-environments/gitlab_remote_dev_workspaces_python_web_ide_create_terminal.png){: .shadow}\n\nUsing the Python example project, try to run the `hello.py` file with the Python interpreter after changing the terminal to `bash` to access auto-completion and shell history. Type `pyth`, press tab, type `hel`, press tab, enter.\n\n```shell\n$ bash\n\n$ python hello.py\n```\n\nThe command will fail because the Python requirements still need to be installed. Let us fix that by running the following command:\n\n```shell\n$ pip install -r requirements.txt\n```\n\n![GitLab remote development workspaces, Python example, install requirements in the terminal](https://about.gitlab.com/images/blogimages/infrastructure-cloud-development-environments/gitlab_remote_dev_workspaces_python_web_ide_terminal_install_pip.png){: .shadow}\n\n**Note**: This example is intentionally kept simple, and does not use best practices with `pyenv` for managing Python environments. We will explore development environment templates in future blog posts.\n\nRun the Python application `hello.py` again to start the web server on port 8080.\n\n```shell\n$ python hello.py\n```\n\nYou can access the exposed port by modifying the URL from the default port at the beginning of the URL to the exposed port `8080`. The `?folder` URL parameter can also be removed.\n\n```diff\n-https://60001-workspace-62029-5534214-kbtcmq.remote-dev.dev/?folder=/projects/example-python-http-simple\n+https://8080-workspace-62029-5534214-kbtcmq.remote-dev.dev/\n```\n\nThe URL is not publicly available and requires access through the GitLab OAuth session.\n\n![GitLab remote development workspaces, Python example, run webserver, access HTTP](https://about.gitlab.com/images/blogimages/infrastructure-cloud-development-environments/gitlab_remote_dev_workspaces_python_web_ide_terminal_run_webserver_access_http.png){: .shadow}\n\nModifying the workspace requires custom container images supporting to run with [arbitrary user IDs](https://docs.gitlab.com/ee/user/workspace/#arbitrary-user-ids). The example project uses a custom image which allows to install Python dependencies and create build artifacts. It also allows to use the bash terminal shown above. Learn more about custom image creation in the next section.\n\n### Custom workspace container images\nCustom container images require support for [arbitrary user IDs](https://docs.gitlab.com/ee/user/workspace/#arbitrary-user-ids). You can build custom container images with [GitLab CI/CD](/solutions/continuous-integration/) and use the [GitLab container registry](https://docs.gitlab.com/ee/user/packages/container_registry/) to distribute the container images on the DevSecOps platform.\n\nWorkspaces run with arbitrary user IDs in the Kubernetes cluster containers and manage resource access with Linux group permissions. Existing container images may need to be changed, and imported as base image for new container images. The [following example](https://gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id/-/blob/main/Dockerfile) uses the `python:3.11-slim-bullseye` image from Docker Hub as a base container image in the `FROM` key. The next steps create and set a home directory in `/home/gitlab-workspaces`, and manage user and group access to specified directories. Additionally, you can install more convenience tools and configurations into the image, for example the `git` package.\n\n[`Dockerfile`](https://gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id/-/blob/main/Dockerfile)\n```\n# Example demo for a Python-based container image.\n# NOTE: THIS IMAGE IS NOT ACTIVELY MAINTAINED. DEMO USE CASES ONLY, DO NOT USE IN PRODUCTION.\n\nFROM python:3.11-slim-bullseye\n\n# User id for build time. Runtime will be an arbitrary random ID.\nRUN useradd -l -u 33333 -G sudo -md /home/gitlab-workspaces -s /bin/bash -p gitlab-workspaces gitlab-workspaces\n\nENV HOME=/home/gitlab-workspaces\n\nWORKDIR $HOME\n\nRUN mkdir -p /home/gitlab-workspaces && chgrp -R 0 /home && chmod -R g=u /etc/passwd /etc/group /home\n\n# TODO: Add more convenience tools into the user home directory, i.e. enable color prompt for the terminal, install pyenv to manage Python environments, etc\nRUN apt update && \\\n    apt -y --no-install-recommends install git procps findutils htop vim curl wget && \\\n    rm -rf /var/lib/apt/lists/*\n\nUSER gitlab-workspaces\n```\n\n **As an exercise**, [fork the project](https://gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id) and modify the package installation step in the `Dockerfile` file to install the `dnsutils` package on the Debian based image to get access to the `dig` command.\n\n[`Dockerfile`](https://gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id/-/blob/main/Dockerfile)\n```diff\n-RUN apt update && \\\n-    apt -y --no-install-recommends install git procps findutils htop vim curl wget && \\\n-    rm -rf /var/lib/apt/lists/*\n+RUN apt update && \\\n+    apt -y --no-install-recommends install git procps findutils htop vim curl wget dnsutils && \\\n+    rm -rf /var/lib/apt/lists/*\n```\n\n[Build the container image](https://docs.gitlab.com/ee/ci/docker/using_docker_build.html) with your preferred CI/CD workflow. On GitLab.com SaaS, you can include the `Docker.gitlab-ci.yml` template which takes care of building the image.\n\n```yaml\ninclude:\n    - template: Docker.gitlab-ci.yml\n```\n\nWhen building the container images manually, use Linux and `amd64` as platform architecture [until multi-architecture support is available for running workspaces](https://gitlab.com/groups/gitlab-org/-/epics/10594). Also, review the [optimizing images guide in the documentation](https://docs.gitlab.com/ee/ci/pipelines/pipeline_efficiency.html#optimize-docker-images) when creating custom container images to optimize size and build times.\n\nNavigate into `Deploy > Container Registry` in the GitLab UI and copy the image URL from the tagged image. Open the `.devfile.yaml` file in the forked GitLab project `example-python-http-simple`, and change the `image` path to the newly built image URL.\n\n[`.devfile.yaml`](https://gitlab.com/gitlab-de/use-cases/remote-development/example-python-http-simple/-/blob/main/.devfile.yaml)\n```diff\n-      image: registry.gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id:latest\n+      image: registry.gitlab.example.com/remote-dev-workspaces/python-remote-dev-workspaces-user-id:latest\n```\n\nNavigate into `Your Work > Workspaces` and create a new workspace for the project, and try to execute the `dig` command to query the IPv6 address of GitLab.com (or any other internal domain).\n\n```shell\n$ dig +short gitlab.com AAAA\n```\n\nThe custom container image project is located [here](https://gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id/).\n\n## Tips\nThis blog post's setup steps with environment variables are easy to follow. For production usage, use automation to manage your environment with Terraform, Ansible, etc.\n\n- Terraform: [Provision a GKE Cluster (Google Cloud)](https://developer.hashicorp.com/terraform/tutorials/kubernetes/gke), [Provision an EKS Cluster (AWS)](https://developer.hashicorp.com/terraform/tutorials/kubernetes/eks), [Provision an AKS Cluster (Azure)](https://developer.hashicorp.com/terraform/tutorials/kubernetes/aks), [Deploy Applications with the Helm Provider](https://developer.hashicorp.com/terraform/tutorials/kubernetes/helm-provider)\n- Ansible: [google.cloud.gcp_container_cluster module](https://docs.ansible.com/ansible/latest/collections/google/cloud/gcp_container_cluster_module.html), [community.aws.eks_cluster module](https://docs.ansible.com/ansible/latest/collections/community/aws/eks_cluster_module.html), [azure.azcollection.azure_rm_aks module](https://docs.ansible.com/ansible/latest/collections/azure/azcollection/azure_rm_aks_module.html), [kubernetes.core collection](https://docs.ansible.com/ansible/latest/collections/kubernetes/core/index.html#plugin-index)\n\n### Certificate management\nThe workspaces domain requires a valid TLS certificate. The examples above used certbot with Let's Encrypt, requiring a certificate renewal after three months. Depending on your corporate requirements, you may need to create TLS certificates signed by the corporate CA identity and manage the certificates. Alternatively, you can look into solutions like [cert-manager for Kubernetes](https://cert-manager.io/docs/getting-started/) that will help renew certificates automatically.\n\nDo not forget to add TLS certificate validity monitoring to avoid unforeseen errors. The [blackbox exporter for Prometheus](https://github.com/prometheus/blackbox_exporter) can help with monitoring TLS certificate expiry and send alerts.\n\n### Troubleshooting\nHere are a few tips for troubleshooting connections and inspecting the cluster resources.\n\n#### Verify the connections\nTry to connect to the workspaces domain to see whether the Kubernetes Ingress controller responds to HTTP requests.\n\n```shell\n$ curl -vL ${GITLAB_WORKSPACES_PROXY_DOMAIN}\n```\n\nInspect the logs of the proxy deployment to follow connection requests. Since the proxy requires an authorization token sent via the OAuth app, an HTTP 400 error is expected for unauthenticated curl requests.\n\n```shell\n$ kubectl logs -f -l app.kubernetes.io/name=gitlab-workspaces-proxy -n gitlab-workspaces\n```\n\nCheck if the TLS certificate is valid. You can also use `sslcan` and other tools.\n\n```shell\n$ openssl s_client -connect ${GITLAB_WORKSPACES_PROXY_DOMAIN}:443\n\n$ sslcan ${GITLAB_WORKSPACES_PROXY_DOMAIN}\n```\n\n[Debug the agent for Kubernetes](https://docs.gitlab.com/ee/user/clusters/agent/work_with_agent.html#debug-the-agent) and inspect the pod logs.\n\n```shell\n$ kubectl get ns\n\n$ kubectl logs -f -l app.kubernetes.io/name=gitlab-agent -n gitlab-agent-\u003CNAMESPACENAME>\n```\n\n#### Workspaces cannot be created even if the agent is connected\nWhen the workspaces deployment is spinning and nothing happens, try restarting the workspaces proxy and agent for Kubernetes. This is a known problem and tracked [in this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/414399#note_1426652421).\n\n```shell\n$ kubectl rollout restart deployment -n gitlab-workspaces\n\n$ kubectl rollout restart deployment -n gitlab-agent-$GL_AGENT_K8S\n```\n\nIf the agent for Kubernetes remains unresponsive, consider a complete reinstall. First, navigate into the GitLab UI into `Operate > Kubernetes Clusters` and [delete the agent](https://docs.gitlab.com/ee/user/clusters/agent/work_with_agent.html#remove-an-agent-through-the-gitlab-ui). Next, use the following commands to delete the Helm release from the cluster, and run the installation command generated from the UI again.\n\n```shell\nkubectl get ns\nhelm list -A\n\nexport RELEASENAME=xxx\nexport NAMESPACENAME=xxx\nexport TOKEN=XXXXXXXXXXREPLACEME\nhelm uninstall ${RELEASENAME} -n gitlab-agent-${NAMESPACENAME}\n\nhelm repo add gitlab https://charts.gitlab.io\nhelm repo update\n\nhelm upgrade --install ${RELEASENAME} gitlab/gitlab-agent \\\n    --namespace gitlab-agent-${NAMESPACENAME} \\\n    --create-namespace \\\n    --set image.tag=v16.1.2 \\\n    --set config.token=${TOKEN} \\\n    --set config.kasAddress=wss://kas.gitlab.com # Replace with your self-managed GitLab KAS instance URL if not using GitLab.com SaaS\n```\n\nExample: `helm uninstall remote-dev-dev -n gitlab-agent-remote-dev-dev`\n\n#### Cannot modify workspace using custom images\nIf you cannot modify the workspace, open a new terminal and check the user id and their groups.\n\n```shell\n$ id\n```\n\nInspect the `.devfile.yaml` file in the project and extract the `image` attribute to test the used container image. You can use container CLI, for example `docker` that runs the container with a different user ID. Note: You can use any user ID to test the behavior.\n\nTip: Use grep and cut commands to extract the image attribute URL from the `.devfile.yaml`.\n\n```shell\n$ cat .devfile.yaml | grep image: | cut -f2 -d ':')\n```\n\nRun the following command to execute the `id` command in the container, and print the user information.\n\n```\n$ docker run -u 1234 -ti registry.gitlab.com/path/to/project/image:tagname id\n```\n\nTry to modify the workspace by running the command `echo 'Hi' >> ~/example.md`. This can fail with a permission error.\n\n```shell\n$ docker run -u 1234 -ti registry.gitlab.com/path/to/project/image:tagname echo 'Hi' >> ~/example.md\n```\n\nIf the above command failed, the Linux user group does not have enough permissions to modify the file. You can view the permissions using the `ls` command.\n\n```shell\n$ docker run -u 1234 -ti registry.gitlab.com/path/to/project/image:tagname ls -lart ~/\n```\n\n### Contribute\nThe [remote development developer documentation](https://gitlab.com/gitlab-org/remote-development/gitlab-remote-development-docs) provides insights into the [architecture blueprint](https://docs.gitlab.com/ee/architecture/blueprints/remote_development/) and how to set up a local development environment to [start contributing](/community/contribute/). In the future, we will be able to use remote development workspaces to develop remote development workspaces.\n\n## Share your feedback\nIn this blog post, you have learned how to manage the infrastructure for remote development workspaces, create your first workspace, and more tips on custom workspace images and troubleshooting. Using the same development environment across organizations and communities, developers can focus on writing code and get fast preview feedback (i.e., by running a web server that can be accessed externally in the remote workspace). Providing the same reproducible environment also helps opensource contributors to reproduce bugs and provide feedback most efficiently. They can use the same best practices as upstream maintainers.\n\nDevelopers and DevOps engineers will be using the Web IDE in workspaces. Later, being able to [connect their desktop client to workspaces](https://gitlab.com/groups/gitlab-org/-/epics/10478), they can take advantage of even more efficiency with the [most comprehensive AI-powered DevSecOps platform](/gitlab-duo/): Code suggestions and more AI-powered workflows are just one fingertip away.\n\nWhat will your teams build with remote development workspaces? Please share your experiences in the [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/410031), blog about your setup, and join our [community forum](https://forum.gitlab.com/) for more discussions.\n\nCover image by [Nick Karvounis](https://unsplash.com/@nickkarvounis) on [Unsplash](https://unsplash.com/photos/SmIM3m8f3Pw)",[479,813,9,683,1041],{"slug":7285,"featured":6,"template":686},"set-up-infrastructure-for-cloud-development-environments","content:en-us:blog:set-up-infrastructure-for-cloud-development-environments.yml","Set Up Infrastructure For Cloud Development Environments","en-us/blog/set-up-infrastructure-for-cloud-development-environments.yml","en-us/blog/set-up-infrastructure-for-cloud-development-environments",{"_path":7291,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7292,"content":7298,"config":7304,"_id":7306,"_type":14,"title":7307,"_source":16,"_file":7308,"_stem":7309,"_extension":19},"/en-us/blog/sfdx-promo-trailhead-blog",{"title":7293,"description":7294,"ogTitle":7293,"ogDescription":7294,"noIndex":6,"ogImage":7295,"ogUrl":7296,"ogSiteName":670,"ogType":671,"canonicalUrls":7296,"schema":7297},"Salesforce developers can now use GitLab for complete DevOps","Learn what’s possible with GitLab and Salesforce, whether you’re looking to reduce cycle time or increase collaboration across cross-functional teams.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680624/Blog/Hero%20Images/gitlab-salesforce.png","https://about.gitlab.com/blog/sfdx-promo-trailhead-blog","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Salesforce developers can now use GitLab's single application for the DevOps lifecycle\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mayank Tahilramani\"}],\n        \"datePublished\": \"2019-05-29\",\n      }",{"title":7299,"description":7294,"authors":7300,"heroImage":7295,"date":7301,"body":7302,"category":299,"tags":7303},"Salesforce developers can now use GitLab's single application for the DevOps lifecycle",[4010],"2019-05-29","\n\nGreat news – we're partnering with Salesforce to offer developers more [agile delivery practices](/solutions/agile-delivery/) and [increased automation](https://docs.gitlab.com/ee/topics/autodevops/) throughout the DevOps lifecycle. Developers can leverage the following capabilities from GitLab for Salesforce DX: [Source Code Management (SCM)](/solutions/source-code-management/), [Continuous Integration (CI) and Continuous Delivery (CD)](/solutions/continuous-integration/), [Project Management](https://about.gitlab.com/solutions/agile-delivery/), and [much more](/pricing/feature-comparison/). Many businesses already run on Salesforce and are just starting to explore the latest and greatest that Salesforce DX has to offer in terms of tooling and functionality to rapidly build apps and enhancements onto their platform. Our partnership with Salesforce will help our customers to reduce cycle times and [deliver business value at the speed of business](/blog/align-business-strategy-and-app-delivery/).\n\n## How GitLab + Salesforce DX work together\n\nSalesforce has been investing in APIs and platform features that allow developers to create and administer Salesforce-connected apps in a more direct and efficient way. With GitLab, developers can now supplement Salesforce development tools through a [templatized CI/CD pipeline](https://gitlab.com/sfdx/sfdx-cicd-template) which leverages [Scratch Orgs](https://developer.salesforce.com/docs/atlas.en-us.sfdx_dev.meta/sfdx_dev/sfdx_dev_scratch_orgs.htm) and [packaging](https://developer.salesforce.com/docs/atlas.en-us.sfdx_dev.meta/sfdx_dev/sfdx_dev_dev2gp_plan_pkg_types.htm) for a structured and frictionless development experience. It’s easier than ever to get started with new Salesforce projects using our new [Salesforce project template](https://gitlab.com/sfdx/sfdx-project-template), and adopt a collaborative development workflow across teams.\n\nFor example, Salesforce development projects stored in GitLab SCM enable developers to work in a Git-based workflow similar to [GitLab Flow](https://docs.gitlab.com/ee/topics/gitlab_flow.html), while transparently collaborating through [issue tracking](https://docs.gitlab.com/ee/user/project/issues/) and [merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/). In this case, each developer can seamlessly create an ad hoc Git branch focused on specific development and functionality of code, allowing for small but continuous incremental changes. Each commit within a branch goes through automated testing and deployment into a Scratch Org.\n\nUpon approval, the changes are then packaged and deployed into a Sandbox Org and, ultimately, into production. GitLab CI/CD aims to automate the use of packaging for predictable Salesforce deployments. In this workflow example, each developer is able to leverage short-lived Scratch Orgs for development and testing of code in a much more Agile fashion.\n\n![Tanuki Badge](https://about.gitlab.com/images/blogimages/gitlab-salesforce-tanuki-badge.png){: .small.right.wrap-text}\n\n## Get hands on with a new Trailhead module and promo\n\nLearning by doing is the best way to get started. Check out our new Trailhead Module, \"[Build an automated CI/CD pipeline with GitLab](https://trailhead.salesforce.com/content/learn/projects/automate-cicd-with-gitlab)\" and learn how to automate your Salesforce development to increase productivity. Earn your Salesforce/GitLab Tanuki badge today!\n\n[Get started now.](/free-trial/)\n{: .alert .alert-gitlab-purple .text-center}\n",[9,231,726],{"slug":7305,"featured":6,"template":686},"sfdx-promo-trailhead-blog","content:en-us:blog:sfdx-promo-trailhead-blog.yml","Sfdx Promo Trailhead Blog","en-us/blog/sfdx-promo-trailhead-blog.yml","en-us/blog/sfdx-promo-trailhead-blog",{"_path":7311,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7312,"content":7318,"config":7323,"_id":7325,"_type":14,"title":7326,"_source":16,"_file":7327,"_stem":7328,"_extension":19},"/en-us/blog/shifting-from-on-prem-to-cloud",{"title":7313,"description":7314,"ogTitle":7313,"ogDescription":7314,"noIndex":6,"ogImage":7315,"ogUrl":7316,"ogSiteName":670,"ogType":671,"canonicalUrls":7316,"schema":7317},"Shifting from on-prem to cloud","The challenges of being on-prem and what to consider when shifting to public cloud.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679664/Blog/Hero%20Images/on-prem-to-cloud.jpg","https://about.gitlab.com/blog/shifting-from-on-prem-to-cloud","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Shifting from on-prem to cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2020-01-09\",\n      }",{"title":7313,"description":7314,"authors":7319,"heroImage":7315,"date":7320,"body":7321,"category":679,"tags":7322},[788],"2020-01-09","\n\nCloud computing and cloud adoption are perennial topics when talking about scalability and growth, but many enterprises still operate a significant portion of their workloads in legacy environments. With so much information on the reduced infrastructure costs and the elasticity of public cloud, why do organizations still do all the work themselves?\n\nIn this discussion with Sr. Product Marketing Manager [William Chia](/company/team/#williamchia), we talk about the challenges traditional IT teams face, the barriers to cloud adoption, and strategies to consider for making the leap.\n\n\n## Why organizations use traditional IT\n\nThe reasons that an organization may want to manage their own infrastructure are myriad and geared toward unique needs and/or limitations within their organization.\n\n\n### Regulatory/Compliance\n\nIn highly-regulated industries such as banking and healthcare, or even government entities, there may be compliance concerns or risks that prevent them from utilizing public cloud. More control means more oversight and more accountability. \"If I need to keep patient data private to comply with HIPAA, for example, if I keep 100% control of my systems and infrastructure I can ensure I comply. If I outsource to cloud services then I have to take different steps to ensure I'm not leaking PII,\" says William. Even though the big cloud providers – namely GCP, AWS, and Azure – have compliance built-in, some organizations may still be hesitant to have them assume those risks.\n\n\n### Protecting sensitive data\n\nIT leaders surveyed in a [Cloud Security Alliance report](https://www.skyhighnetworks.com/cloud-security-blog/5-surprising-truths-from-the-cloud-security-alliances-latest-survey/) expressed that, while they are confident in cloud security capabilities, there are things that can go wrong beyond their control: Inside threats, compromised accounts, and misconfigured security settings up the stack that can all lead to security breaches. According to nearly 68% of the IT leaders surveyed, the ability to enforce corporate security policies is the [number one barrier to moving applications to the cloud](https://www.skyhighnetworks.com/cloud-security-blog/5-surprising-truths-from-the-cloud-security-alliances-latest-survey/). \"The top-level concern basically comes down to control and data privacy,\" says William.\n\n\n### Better costs\n\nFor companies operating at a small scale, cloud computing’s pay-per-use model will almost always be cheaper than managing your own data centers, but for larger-scale organizations that isn’t always the case. \"There's a breaking point… If you run on-prem, it actually could be cheaper than your cloud bill at huge scale, but you’re running so much software you’re basically running your own private cloud at that point,\" says William. For a long-term strategy, organizations have to weigh their CapEx vs OpEx costs, and while [CapEx involves a large upfront expense in whole systems and servers](https://www.10thmagnitude.com/opex-vs-capex-the-real-cloud-computing-cost-advantage/), and the continued cost of maintenance, the computing volume could make this a worthwhile investment.\n\nAnother reason that companies may run their own infrastructure is because that’s how they’ve always done it. While not a very scientific answer, it’s the reality for many companies, especially those that grew before the age of cloud.\n\n\"Once upon a time, if you were a large enterprise and you had to run a lot of software, you had no choice but to manage it all yourself. And so now you have all these servers, you have all of these staff, and you have all of these business processes. You have a great deal of both physical and logical infrastructure and if you want to move to the cloud you have to change all of it. That comes at a very high cost,\" says William.\n\nIn the past, moving small amounts of data was relatively easy. When we start talking about exabytes of data, rather than terabytes of data, the process of migration becomes herculean. According to Jean-Luc Valente, the VP for product management in the cloud platforms and solutions group at Cisco, egressing that kind of data to a public cloud could [cost as much as $30 million dollars](https://www.zdnet.com/article/multicloud-is-here-but-challenges-remain/).\n\n\n\n\n\n## The challenges of on-prem infrastructure\n\nWhile organizations may have specific reasons for running on-premises infrastructure, that decision comes with distinct challenges.\n\n\n### Range of expertise\n\n\"Above a certain level, you are managing all of your infrastructure and you're managing all of your uptime. That's a lot of expertise. You need to become as good at operating a cloud infrastructure as Amazon or Google is, which is why those public clouds are so radically popular. In order to get there requires a lot of resources,\" says William.\n\n\n### Managing software and hardware\n\nIn order to manage uptime and security, operations teams need to perform software maintenance like upgrades and patches in addition to managing physical assets like servers, racks, power supplies, and network switches. At a certain point, an organization is devoting a lot of resources to just keeping things running rather than innovating, so all of these resources are being invested in undifferentiated engineering.\n\n\n### Undifferentiated engineering\n\nIf it is not a core competency for your organization, then it’s undifferentiated engineering. \"If you don't need to manage that on-premises data center or servers for a specific reason, then the cloud is more attractive because that's a high cost,\" says William. \"You're spending a lot of engineering dollars on things that are not differentiating you in the marketplace.\"\n\n\n## Strategies for shifting to cloud\n\n\n### The benefits of \"lift-and-shift\"\n\nIn previous posts, we’ve talked about [legacy and monolithic applications acting as a barrier](/blog/cloud-adoption-roadmap/) for cloud adoption, but there can be some benefit to lifting and shifting those applications to the cloud. While you may not be able to take full advantage of microservices and cloud native application development, shifting those applications to the cloud does provide the benefit of reducing your operational overhead. This can provide an opportunity to learn new competencies.\n\n\"There's a separate set of competencies that you need to acquire to start running in the cloud. You don’t need to learn everything all at once. If you take a monolithic, on-premises app, simply lift-and-shift it into a VM in the cloud, that allows you to start to understand things like cloud billing, and gain some of the competencies of a cloud deployment pattern,\" says William.\n\n\n### Hybrid cloud\n\nMany organizations have opted to use both private and public cloud for a hybrid cloud infrastructure. These hybrid clouds blend the control and security of a private cloud, but also the flexibility and agility of public cloud. During periods of high usage, organizations can leverage public cloud’s pay-per-use model and save themselves from needing additional infrastructure. Organizations can use their private cloud for sensitive data and public cloud for developing and testing new applications. Having a hybrid cloud environment allows teams to manage their on-premises infrastructure and take advantage of public cloud scale.\n\n\nWhile cloud adoption is widespread, many organizations have unique reasons to stay or migrate to an on-premises infrastructure. Cost, control, and risk mitigation continue to be the main drivers of on-prem vs. cloud decisions. Public cloud’s pay-per-use model may not be more cost effective for organizations that operate at higher scale, but a hybrid cloud model can offer organizations the flexibility to use public cloud during periods of high usage without having to invest in additional infrastructure. Both on-prem and cloud require unique and extensive operational competencies, so teams will need leaders that are skilled in these areas when making the switch.\n\n\nCover image by [Matt Howard](https://unsplash.com/@thematthoward?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/journey?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,1041],{"slug":7324,"featured":6,"template":686},"shifting-from-on-prem-to-cloud","content:en-us:blog:shifting-from-on-prem-to-cloud.yml","Shifting From On Prem To Cloud","en-us/blog/shifting-from-on-prem-to-cloud.yml","en-us/blog/shifting-from-on-prem-to-cloud",{"_path":7330,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7331,"content":7337,"config":7342,"_id":7344,"_type":14,"title":7345,"_source":16,"_file":7346,"_stem":7347,"_extension":19},"/en-us/blog/simple-kubernetes-management-with-gitlab",{"title":7332,"description":7333,"ogTitle":7332,"ogDescription":7333,"noIndex":6,"ogImage":7334,"ogUrl":7335,"ogSiteName":670,"ogType":671,"canonicalUrls":7335,"schema":7336},"Simple Kubernetes management with GitLab","Follow our tutorial to provision a Kubernetes cluster and manage it with IAC using Terraform and Helm in 20 minutes or less.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670037/Blog/Hero%20Images/auto-deploy-google-cloud.jpg","https://about.gitlab.com/blog/simple-kubernetes-management-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Simple Kubernetes management with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2022-11-15\",\n      }",{"title":7332,"description":7333,"authors":7338,"heroImage":7334,"date":7339,"body":7340,"category":791,"tags":7341},[5941],"2022-11-15","\n\nKubernetes can be very complex and has dozens of tutorials out there on how to provision and manage a cluster. This tutorial aims to provide a simple, lightweight solution to provision a Kubernetes cluster and manage it with infrastructure as code (IaC) using Terraform and Helm in 20 minutes or less.\n\n**The final product of this tutorial will be two IaC repositories with fully functional CI/CD pipelines:**\n\n1. [gitlab-terraform-k8s](https://gitlab.com/gitlab-org/configure/examples/gitlab-terraform-eks) - A single source of truth to provision, configure, and manage your Kubernetes infrastructure using Terraform\n1. [cluster-management](https://gitlab.com/gitlab-org/project-templates/cluster-management) - A single source of truth to define the desired state of your Kubernetes cluster using the GitLab Agent for Kubernetes and Helm\n\n![Final Product](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/final-product.png){: .shadow}\n\n\n### Prerequisites\n- AWS or GCP account with permissions to provision resources\n- GitLab account \n- Access to a GitLab Runner\n- 20 minutes\n\n### An overview of this tutorial is as follows:\n\n1. Set up the GitLab Terraform Kubernetes Template 🏗️\n2. Register the GitLab Agent 🕵️\n3. Add in Cloud Credentials ☁️🔑\n4. Set up the Kubernetes Cluster Management Template 🚧\n5. Enjoy your Kubernetes Cluster completely managed in code! 👏\n\n## Set up the GitLab Terraform Kubernetes Template\n\nStart by importing the example project by URL - [https://gitlab.com/projects/new#import_project](https://gitlab.com/projects/new#import_project)\n\nTo import the project:\n\n1. In GitLab, on the top bar, select **Main menu > Projects > View all projects**.\n2. On the right of the page, select **New project**.\n3. Select **Import project**.\n4. Select **Repository by URL**.\n5. For the Git repository URL:\n- [GCP Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine): https://gitlab.com/gitlab-org/configure/examples/gitlab-terraform-gke.git\n- [AWS Elastic Kubernetes Service](https://aws.amazon.com/eks/): https://gitlab.com/gitlab-org/configure/examples/gitlab-terraform-eks.git\n6. Complete the fields and select **Create project**.\n\n## Register the GitLab Agent\n\nWith your newly created **gitlab-terraform-k8s** repo, create a GitLab Agent for Kubernetes:\n\n1. On the left sidebar, select **Infrastructure > Kubernetes clusters**. Select **Connect a cluster (agent).**\n2. From the **Select an agent** dropdown list, select **eks-agent/gke-agent and select **Register an agent**.\n3. GitLab generates a registration token for the agent. **Securely store this secret token, as you will need it later.**\n4. GitLab provides an address for the agent server (KAS). Securely store this as you will also need it later.\n5. Add this to the **gitlab-terraform-eks/.gitlab/agents/eks-agent/config.yaml** in order to allow the GitLab Agent to have access to your entire group.\n\n```yaml\nci_access:\n  groups:\n    - id: your-namespace-here\n```\n\n![Register GitLab Agent](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/register-gitlab-agent.png){: .shadow}\n\n\n## Add in your Cloud Credentials to CI/CD variables\n\n### [AWS EKS](https://aws.amazon.com/eks/)\n\nOn the left sidebar, select **Settings > CI/CD. Expand Variables**.\n1. Set the variable **AWS_ACCESS_KEY_ID** to your AWS access key ID.\n2. Set the variable **AWS_SECRET_ACCESS_KEY** to your AWS secret access key.\n3. Set the variable **TF_VAR_agent_token** to the agent token displayed in the previous task.\n4. Set the variable **TF_VAR_kas_address** to the agent server address displayed in the previous task.\n\n![Add in CI/CD variables](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/cicd-variables.png){: .shadow}\n\n\n### [GCP GKE](https://cloud.google.com/kubernetes-engine)\n\n1. To authenticate GCP with GitLab, create a GCP service account with the following roles: **Compute Network Viewer, Kubernetes Engine Admin, Service Account User, and Service Account Admin**. Both User and Admin service accounts are necessary. The User role impersonates the default service account when creating the node pool. The Admin role creates a service account in the kube-system namespace.\n2. **Download the JSON file** with the service account key you created in the previous step.\n3. On your computer, encode the JSON file to base64 (replace /path/to/sa-key.json to the path to your key):\n\n```\nbase64 -i /path/to/sa-key.json | tr -d\n```\n\n- Use the output of this command as the **BASE64_GOOGLE_CREDENTIALS** environment variable in the next step.\n\nOn the left sidebar, select **Settings > CI/CD. Expand Variables**.\n5. Set the variable **BASE64_GOOGLE_CREDENTIALS** to the base64 encoded JSON file you just created.\n6. Set the variable **TF_VAR_gcp_project** to your GCP’s project name.\n7. Set the variable **TF_VAR_agent_token** to the agent token displayed in the previous task.\n8. Set the variable **TF_VAR_kas_address** to the agent server address displayed in the previous task.\n\n## Run GitLab CI to deploy your Kubernetes cluster!\n\n![Deploy Kubernetes cluster](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/pipeline-view.png){: .shadow}\n\nWhen successfully completed, view the cluster in the AWS/GCP console!\n\n![AWS EKS](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/aws-eks.png){: .shadow}\n\n### You are halfway done! 👏 Keep it up!\n\n## Set up the Kubernetes Cluster Management Project\n\nCreate a project from the cluster management project template - [https://gitlab.com/projects/new#create_from_template](https://gitlab.com/projects/new#create_from_template)\n\n1. In GitLab, on the top bar, select **Main menu > Projects > View all projects**.\n2. On the right of the page, select **New project**.\n3. Select **Create from template**.\n4. From the list of templates, next to **GitLab Cluster Management**, select **Use template**.\n5. Enter the project details. Ensure this project is created in the same namespace as the gitlab-terraform-k8s project.\n6. Select **Create project**.\n7. Once the project is created on the left sidebar, select **Settings > CI/CD. Expand Variables**.\n8. Set the variable KUBE_CONTEXT to point to the GitLab Agent. For example, `noah-ing-demos/infrastructure/gitlab-terraform-eks:eks-agent`.\n\n![Set Kube Context](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/kube-config.png){: .shadow}\n\n\n- **Uncomment the applications you'd like to be installed** into your Kubernetes cluster in the **helmfile.yaml**. In this instance I chose ingress, cert-manager, prometheus, and Vault. \n\n![Uncomment Applications in helmfile](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/helmfile.png){: .shadow}\n\nThat will trigger your **CI/CD pipeline** and it should look like this.\n\n![Cluster Management CI/CD](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/cluster-management-cicd.png){: .shadow}\n\nOnce completed, **go to the AWS/GCP console** and check out all the deployed resources!\n\n![Deployed EKS applications](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/deployed-eks-applications.png){: .shadow}\n\n### Voila! 🎉\n\n## Enjoy your Kubernetes cluster completely defined in code! 👏👏👏\n\nNow with these two repositories you can **manage a Kubernetes cluster entirely through code**:\n\n- For managing the Kubernetes cluster's infrastructure and configuring its resources you can make changes to the [gitlab-terraform-eks](https://gitlab.com/gitlab-org/configure/examples/gitlab-terraform-eks) repository you have setup. This project has a **Terraform CI/CD pipeline** that will allow you to **review, provision, configure, and manage your Kubernetes** infrastructure with ease.\n\n- For managing the desired state of the Kubernetes cluster, the [cluster-management](https://gitlab.com/gitlab-org/project-templates/cluster-management) repository has a **GitLab Agent** set up and will **deploy any Kubernetes objects defined in the helm files**.\n\n➡️ Bonus: If you'd like to deploy your own application to the Kubernetes cluster, then add to your **cluster-management** `helmfile` and see the GitLab Agent for Kubernetes roll it out with ease!\n\n\n## References\n- [Create a New GKE Cluster](https://docs.gitlab.com/ee/user/infrastructure/clusters/connect/new_gke_cluster.html)\n- [Create a New EKS Cluster](https://docs.gitlab.com/ee/user/infrastructure/clusters/connect/new_eks_cluster.html)\n- [Cluster Management Project](https://docs.gitlab.com/ee/user/clusters/management_project.html)\n\n\n## Related posts\n- [The ultimate guide to GitOps with GitLab](https://about.gitlab.com/blog/the-ultimate-guide-to-gitops-with-gitlab/)\n- [GitOps with GitLab: Infrastructure provisioning with GitLab and Terraform](https://about.gitlab.com/blog/gitops-with-gitlab-infrastructure-provisioning/)\n- [GitOps with GitLab: Connect with a Kubernetes cluster](https://about.gitlab.com/blog/gitops-with-gitlab-connecting-the-cluster/)\n",[978,1477,534,976,977,9],{"slug":7343,"featured":6,"template":686},"simple-kubernetes-management-with-gitlab","content:en-us:blog:simple-kubernetes-management-with-gitlab.yml","Simple Kubernetes Management With Gitlab","en-us/blog/simple-kubernetes-management-with-gitlab.yml","en-us/blog/simple-kubernetes-management-with-gitlab",{"_path":7349,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7350,"content":7354,"config":7359,"_id":7361,"_type":14,"title":7362,"_source":16,"_file":7363,"_stem":7364,"_extension":19},"/en-us/blog/six-more-months-ci-cd-github",{"title":7351,"description":1859,"ogTitle":7351,"ogDescription":1859,"noIndex":6,"ogImage":1861,"ogUrl":7352,"ogSiteName":670,"ogType":671,"canonicalUrls":7352,"schema":7353},"Extending free use of CI/CD for GitHub on GitLab.com","https://about.gitlab.com/blog/six-more-months-ci-cd-github","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Extending free use of CI/CD for GitHub on GitLab.com\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Chia\"}],\n        \"datePublished\": \"2019-03-21\",\n      }",{"title":7351,"description":1859,"authors":7355,"heroImage":1861,"date":7356,"body":7357,"category":299,"tags":7358},[723],"2019-03-21","\n\nUPDATE: We've [extended again until Mar. 22, 2020](/blog/ci-cd-github-extended-again/)\n\n[CI/CD is one of the best parts of GitLab](/topics/ci-cd/). Our robust feature set and powerful Runner architecture have earned us some strong industry accolades. While we believe using GitLab end to end as a single application is the best experience, we also believe in [playing well with others](/handbook/product/gitlab-the-product/#plays-well-with-others) so that you can use the tools you want without vendor lock-in. In this spirit, we built [CI/CD for external repos](https://docs.gitlab.com/ee/ci/ci_cd_for_external_repos/) and [CI/CD for GitHub](/solutions/github/) to allow you to host your code repositories on GitHub.com, GitHub Enterprise, BitBucket, or any Git server, while using GitLab CI/CD to build, test, and deploy your code.\n\nWe decided to extend the deadline for using CI/CD for external repos, including CI/CD for GitHub, until **Sep. 22, 2019**. You’ll now have an additional six months to enjoy CI/CD for external repos as a [Free or Bronze](/user on GitLab.com. This feature will continue to be part of the [Premium tier](/pricing/premium/) for GitLab Self-managed.\n\n## Always free for open source\n\nThis extension applies to private repos hosted on GitLab.com. As part of our commitment to open source, public projects get [all the features of Gold for free](/pricing/). GitLab CI/CD for GitHub works by automatically mirroring your repos to GitLab.com. As such, if you have a public project on GitHub, it will also be public on GitLab so you can always take advantage of GitLab CI/CD for public projects.\n\n## Why we're extending the offer\n\nIn full [transparency](https://handbook.gitlab.com/handbook/values/#transparency), there are a few reasons we decided on an extension.\n\nThe first reason is that we didn’t want to ruin anyone’s day by shutting off functionality without fair warning. We don’t currently have all of the instrumentation in place to give us confidence that we can appropriately notify users, so we'll spend some time in the coming months to build this ability. We want to give ample opportunity for everyone currently enjoying the functionality on GitLab.com Free and Starter to make the choice to upgrade or migrate.\n\nThe second reason is the changing CI/CD market landscape. With recent developments – like the [consolidation of the CI/CD market](/blog/ci-cd-market-consolidation/) and the launch of the [Continuous Delivery Foundation](/blog/gitlab-joins-cd-foundation/) – we’ve seen greater interest in using GitLab CI/CD with other Git hosting options. Extending the timeline will allow more folks to test it out.\n\nFinally, we want to take this time to capture additional feedback on how you use this feature so we can improve it. If you are using GitLab CI/CD with any external Git repository, like GitHub.com, GitHub Enterprise, BitBucket, or even  your own vanilla Git server, we’d love to hear why you keep your code where you do, what you like about GitLab CI/CD, and what we can improve. We have several open channels for feedback so please leave a comment on this post, send us a message on Twitter with the hashtag #GitLabCICD, or log an issue with a bug fix or feature request on our [open issue tracker](https://gitlab.com/gitlab-org/gitlab-ce/issues). We hope you enjoy an extra six months of usage and hope to hear from you soon.\n",[109,9,267],{"slug":7360,"featured":6,"template":686},"six-more-months-ci-cd-github","content:en-us:blog:six-more-months-ci-cd-github.yml","Six More Months Ci Cd Github","en-us/blog/six-more-months-ci-cd-github.yml","en-us/blog/six-more-months-ci-cd-github",{"_path":7366,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7367,"content":7373,"config":7378,"_id":7380,"_type":14,"title":7381,"_source":16,"_file":7382,"_stem":7383,"_extension":19},"/en-us/blog/soc2-compliance",{"title":7368,"description":7369,"ogTitle":7368,"ogDescription":7369,"noIndex":6,"ogImage":7370,"ogUrl":7371,"ogSiteName":670,"ogType":671,"canonicalUrls":7371,"schema":7372},"How secure is GitLab?","Learn about GitLab's commitment to security and compliance, our security program maturity and accreditations.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669646/Blog/Hero%20Images/blog-soc2-compliance.jpg","https://about.gitlab.com/blog/soc2-compliance","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How secure is GitLab?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Saumya Upadhyaya\"},{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2020-06-24\",\n      }",{"title":7368,"description":7369,"authors":7374,"heroImage":7370,"date":7375,"body":7376,"category":875,"tags":7377},[2021,2022],"2020-06-24","\n\nWhen trying out a new vendor, you want to ensure the company meets your organization’s security policies. Often, we receive questionnaires from our customers to validate our security posture and to understand the maturity of GitLab’s security program.\n\nAs a rapidly growing company, we are in a fortunate position to have a lot of new customers sign up for our solution. We want our customers to have confidence in our offering from a security perspective, and we want to be able to provide that assurance in the most transparent and accessible way possible.\n\nTo demonstrate our commitment to security and compliance and to provide customers with an insight into our security maturity, we have pursued (and continue to pursue) a number of programs and accreditations. We’re excited to share that information with you.\n\n## SOC 2 Report\n\n[SOC 2](https://www.aicpa.org/interestareas/frc/assuranceadvisoryservices/serviceorganization-smanagement.html) is a security control report developed by the [American Institute of Certified Public Accountants](https://www.aicpa.org/) (AICPA) designed to give a holistic view of the design and effectiveness of a company's security program. A SOC 2 audit report provides an independent opinion about an organization's security and is becoming an industry standard for evaluating vendor security program maturity.\n\nThere are two types of SOC 2 reports:\n\n* **SOC 2 - Type 1** - which evaluates the design of controls\n* **SOC 2 - Type 2** - which evaluates the design and operating effectiveness of controls\n\n#### The SOC2 Report\n\nAs of 2021, GitLab has received a SOC 2 Type 2 attestation. Prior to receiving this attestation, we underwent a SOC 2 Type 1 audit in preparation for our Type 2. We detailed our experience undergoing the SOC 2 Type 1 audit, in this blog post, [The benefits of transparency in a compliance audit](/blog/benefits-of-transparency-in-compliance/).\n\n#### How can current (or prospective) customers get a copy of GitLab's most recent SOC 2 report?\n\nSince this report contains candid information about how our systems operate and proprietary audit specific information, we require certain confidentiality agreements be in place. This is built into our Terms of Service for current customers; for prospective customers we request you to complete an NDA with the help of your sales account leader.\n\nTo request the report and more details on our SOC 2 program please visit our [Security Certifications and Attestations handbook page](/handbook/security/security-assurance/security-compliance/certifications.html#requesting-a-copy-of-the-gitlab-soc2-type-2-report).\n\n## CSA Consensus Assessments Initiative Questionnaire (CAIQ)\n\nThe Cloud Security Alliance Consensus Assessments Initiative Questionnaire (CAIQ) from [CSA STAR](https://cloudsecurityalliance.org/) offers an industry-accepted way to document security controls in SaaS services - thereby helping customers to gauge the security posture of cloud service providers. The CAIQ Questionnaire captures most of the frequently asked security questions such as:\n\n* Do you use industry standards (i.e. OWASP Software Assurance Maturity Model, ISO 27034) to build in security for your Systems/Software Development Lifecycle (SDLC)?\n* Do you verify that all of your software suppliers adhere to industry standards for SDLC security?\n* Do you enforce data access permissions based on the rules of Authentication, Authorization and Accountability (AAA)?\n\n### Where can you get the GitLab CAIQ?\n\nUnlike the SOC 2 Type 1 Report, this questionnaire does not require a non disclosure agreement and is available for download by all users at [GitLab’s CAIQ page at the CSA website](https://cloudsecurityalliance.org/star/registry/gitlab/).\n\n## GitLab Control Framework (GCF)\n\nThe [GitLab Control Framework](/handbook/security/security-assurance/security-compliance/sec-controls.html) is a set of controls that establish security requirements for the organization and GitLab's operating environment. These controls provide assurance to customers that GitLab has a robust security program and that their data within GitLab is appropriately protected.\n\nThe GitLab Control Framework has prioritized security controls needed for PCI, Sarbanes–Oxley (SOX), and SOC 2 Security Criteria spanning across the following topics:\n\n* Asset management\n* Backup management\n* Business continuity\n* Change management\n* Configuration management\n* Data management\n* Identity and access management\n* Incident response\n* Network operations\n* People resources\n* Risk management\n* Security governance\n* Service lifecycle\n* Systems design documentation\n* Systems monitoring\n* Third party management\n* Training and awareness\n* Vulnerability management\n\nYou can read on about how we [chose our framework](/blog/choosing-a-compliance-framework/) and [how we implemented and adapted the Adobe Compliance Framework](/blog/creating-the-gitlab-controls-framework/).\n\n## PCI Compliance\n\nPayment Card Industry's Data Security Standard (PCI-DSS), defined by the [PCI Security Standards Council](https://www.pcisecuritystandards.org/), identifies the requirements for vendors that accept or facilitate credit card payments. Based on the volume of transactions by the vendor, the vendor is classified under one of [four levels](https://en.wikipedia.org/wiki/Payment_Card_Industry_Data_Security_Standard#Compliance_levels).\n\nGitLab is currently a Level 4 merchant for PCI which requires us to:\n\n* Complete an annual self-attestation questionnaire (SAQ)\n* Perform a quarterly scan of our PCI systems by an approved scanning vendor. GitLab uses [Tenable.io](https://www.tenable.com/products/tenable-io)\n\nGitLab's Attestation of Compliance (AoC) is available on request, via security@gitlab.com. Learn more about [GitLab PCI compliance](/handbook/security/security-assurance/security-compliance/certifications.html#current).\n\n## What’s next?\n\nSecurity and compliance are ongoing processes and GitLab is committed to continual iteration, maturation, and improvement of our information security program.\n\nOur immediate priorities include:\n\n* Continuous iteration and improvement of our security controls with updated mappings between the GitLab Controls Framework and industry standards like SOC 2, ISO 27001, PCI, FedRAMP, and others\n* The **SOC 2 Type 2 report**, which evaluates operational efficiency in addition to design controls, will commence in 2021\n* The **Standardized Information Gathering (SIG) questionnaire**, a standardized 3rd party risk assessment tool, which along with our CAIQ will provide readily accessible background and transparency into our security program.\n\nHave a question about any of our existing or ongoing compliance efforts? Or maybe feedback about implementing compliance programs in an iterative, highly-transparent environment? We’d love to hear from you. Leave us a comment!\n\n*Read more about our security compliance:*\n\n[Transparency can actually help a security audit. Here's how](/blog/benefits-of-transparency-in-compliance/)\n\n[Can technology outpace security compliance?](/blog/when-technology-outpaces-security-compliance/)\n\n[Choosing between an independent or aggregate compliance framework](/blog/choosing-a-compliance-framework/)\n\nCover image by [Josh Calabrese](https://unsplash.com/photos/qmnpqDwla_E) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[9,916,875],{"slug":7379,"featured":6,"template":686},"soc2-compliance","content:en-us:blog:soc2-compliance.yml","Soc2 Compliance","en-us/blog/soc2-compliance.yml","en-us/blog/soc2-compliance",{"_path":7385,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7386,"content":7392,"config":7396,"_id":7398,"_type":14,"title":7399,"_source":16,"_file":7400,"_stem":7401,"_extension":19},"/en-us/blog/soft-skills-are-the-key-to-your-devops-career-advancement",{"title":7387,"description":7388,"ogTitle":7387,"ogDescription":7388,"noIndex":6,"ogImage":7389,"ogUrl":7390,"ogSiteName":670,"ogType":671,"canonicalUrls":7390,"schema":7391},"Soft skills are the key to your DevOps career advancement","Learn the top soft skills you should invest time in to get a better salary and achieve your career goals.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668185/Blog/Hero%20Images/Chorus_case_study.png","https://about.gitlab.com/blog/soft-skills-are-the-key-to-your-devops-career-advancement","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Soft skills are the key to your DevOps career advancement\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2021-11-30\",\n      }",{"title":7387,"description":7388,"authors":7393,"heroImage":7389,"date":3815,"body":7394,"category":679,"tags":7395},[810],"\nIf work in [DevOps](/topics/devops/) and you want to become a DevOps manager, communicate tech needs effectively with executives in the C-Suite, or boost your salary, it’s time to invest in your soft skills.\n\n\"Soft skills should be a huge focus for anyone looking to further their career,” says [PJ Metz](https://gitlab.com/PjMetz), education evangelist at GitLab. “You may be brilliant at the technical aspects of a job, but if you don’t have good interpersonal relationships, you'll get left behind.”\n\nNow, hold on! Just hear me out. I’m not talking about Kumbaya here. No holding hands and dancing under the full moon. Nope. I’m talking about non-technical, yet critical, skills that will enable you to engage with co-workers, especially executives, so their eyes don’t glaze over when you excitedly talk tech. \n\n“You need more than to know the tech really well. You need good leadership and communication skills,” says [Brendan O’Leary](https://gitlab.com/brendan), a staff developer evangelist, and product and engineering leader at GitLab. \n\nThis is particularly true if you want to be promoted to management. “Managers need to understand people and how they work together and what really motivates - and demotivates - people,” O’Leary says.\n\nMetz points to public speaking as another essential skill. “As virtual becomes a standard for meetings and conventions, being able to speak well means conveying your point well,” he says. “It also solidifies you as a leader who can help others understand complicated topics.”\n\nIf you’re going to help the business side of the company understand how DevOps will enable them to be more competitive and make more money, then you need soft skills. If you want that manager position, you need soft skills. If you even want to work better with your DevOps teammates, you need soft skills. Trust me. Knowing this stuff will help you get your tech and career goals accomplished.\n\n## The soft skills focus list\n\nSo what are we talking about when we say soft skills? Here are some examples:\n \n**Communication skills**, including how to talk to colleagues on the business side without using technical jargon or acronyms \n\n**Business understanding** – what does your company need?\n\n**Leadership**, including people management\n\n**Cool under pressure** – can you work calmly and effectively?\n\n**Problem solving**\n\n**Collaboration**: While DevOps is about collaborating to push out better software, you also need to be able to collaborate with people in other departments, like marketing, finance, sales and legal, to improve the business overall.\n\n## Where do you learn soft skills? \n\nCollege courses, journal articles and conference sessions are always a good place to start to learn soft skills. Here are some additional options:\n \nThere are helpful podcasts out there. For instance, check out the “Humans of DevOps” podcast series from the [DevOps Institute](https://www.devopsinstitute.com/resources/), which features episodes such as “Discussing Qualities of Great Leaders” and “Humans are Hard, Code is Easy.”\n\nYouTube has a lot of instructional videos, including “How to Speak With Confidence”, “Business Communication Essentials”, “Collaborative Problem Solving”, and “How to Speak Like an Executive.”\n\n[Coursera](https://www.coursera.org/) also is worth a look. Founded by Stanford University computer science professors, Coursera works with universities and other organizations to offer online courses, certifications and degrees in a variety of subjects.\n\nDon’t forget us right here at GitLab. For instance, [GitLab Learn](https://about.gitlab.com/learn/) offers classes such as “Effective Communication” and “Mastering Self-Motivation and Self-Advocacy.”\n\nLinkedIn also offers classes on business communication.\n\n## The payoff for improving soft skills\n\nYah, we get it. When you think about [ways to up your salary](/blog/four-tips-to-increase-your-devops-salary/) or focus on [continuous education](/blog/best-advice-for-your-devops-career-keep-on-learning/), you think about so-called hard skills, like mastering new programming languages and learning more about security and automation. You forget about, simply ignore or choose not to “waste” time on the soft skills. Then you wonder why you’re not moving up the career ladder, leading a team or making a presentation to the business execs. \n\nThe [2021 Enterprise DevOps Skills Report](https://learn.gitlab.com/devops-institute/2021-doi-devops-upskilling-report?utm_medium=email&utm_source=marketo&utm_campaign=devopsgtm&utm_content=doi-devops-upskilling-report) showed that people skills now are considered a “must have,” with 69 percent of survey respondents ranking human skills as the second-most valuable. Similarly, 68 percent indicated that a DevOps leader must be skilled in empowering\nand developing others.\n\nThe bottom line is if you invest in your soft skills, including learning to speak the language of business, then you’re more likely to achieve your career goals. \n\n",[813,9,749],{"slug":7397,"featured":6,"template":686},"soft-skills-are-the-key-to-your-devops-career-advancement","content:en-us:blog:soft-skills-are-the-key-to-your-devops-career-advancement.yml","Soft Skills Are The Key To Your Devops Career Advancement","en-us/blog/soft-skills-are-the-key-to-your-devops-career-advancement.yml","en-us/blog/soft-skills-are-the-key-to-your-devops-career-advancement",{"_path":7403,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7404,"content":7410,"config":7416,"_id":7418,"_type":14,"title":7419,"_source":16,"_file":7420,"_stem":7421,"_extension":19},"/en-us/blog/software-developer-changing-role",{"title":7405,"description":7406,"ogTitle":7405,"ogDescription":7406,"noIndex":6,"ogImage":7407,"ogUrl":7408,"ogSiteName":670,"ogType":671,"canonicalUrls":7408,"schema":7409},"Software developer roles: How responsibilities are evolving","From your job title to where you sit in the organization and what your priorities are, every single aspect of the software developer role is about to change. More than a dozen DevOps practitioners and analysts shared their views of the future. Here's what you need to know.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664054/Blog/Hero%20Images/future-of-software-developer-role-changing.png","https://about.gitlab.com/blog/software-developer-changing-role","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The software developer role and responsibilities are changing. Here's what to expect\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-10-20\",\n      }",{"title":7411,"description":7406,"authors":7412,"heroImage":7407,"date":7413,"body":7414,"category":679,"tags":7415},"The software developer role and responsibilities are changing. Here's what to expect",[851],"2020-10-20","\n_This is the first in our four-part series on the future of software development. Part two puts the spotlight on [\"future\" technologies that may impact how software is developed](/blog/how-tomorrows-tech-affects-sw-dev/). Part three looks at [the role artificial intelligence (AI) will play in software development](/blog/ai-in-software-development/), and part four tackles [how to future-proof your developer career](/blog/future-proof-your-developer-career/)._\n\nWhat is the role of a developer? Rapid technology advancements, sweeping changes in business priorities, and a seemingly insatiable demand for software have collided in ways that will likely mean substantive changes to the software developer's role over the next few years.\n\nNothing is static, of course, and in our [2020 Global DevSecOps Survey](/developer-survey/) we found that developers are already reporting new responsibilities – such as tasks normally associated with ops and security – while at the same time releasing software much faster and embracing new technologies including Kubernetes, [microservices](/topics/microservices/), and even AI.\n\nBut over the next few years even some fundamental things – like the meaning of \"developer\" or the role in the organization – are poised to change. Here's what more than a dozen DevOps practitioners and analysts see coming.\n\n_Our [2022 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/previous/2022/) is out now! Learn the latest in DevOps insights from over 5,000 DevOps professionals._\n\n## What's in a name?\n\nThanks to the explosion of demand for [enterprise software applications](/enterprise/), a wave of \"development democratization\" is about to hit professional developers, and that is going to mean a fundamental shift in what it means to *be* a developer, according to [a report from Deloitte Insights](https://www2.deloitte.com/us/en/insights/focus/signals-for-strategists/digital-transformation-moving-beyond-it.html). [Low-code and no-code development tools](/blog/low-code-no-code/) allow virtually anyone with a good idea to create some level of application, meaning a potentially unlimited number of citizen developers will be capable of doing at least basic application development. Software development won't be restricted to just pro developers anymore and this shift is well underway in the enterprise market because there simply [aren't enough coders to meet the demand](https://www.cnbc.com/2019/06/18/there-are-70000-open-tech-jobs-here-is-how-firms-are-hiring-for-them.html). Writing in a ComputerWeekly article, Gartner research director [Paul Vincent](https://www.linkedin.com/in/paulvincent/?originalSubdomain=uk) goes one step further and suggests there might be times [even professional developers choose to use a low-code tool](https://www.computerweekly.com/feature/Gartner-What-to-consider-before-adopting-low-code-development).\n\n## A new org chart\n\n_Goodbye IT department and hello line of business._ As the software stakes get higher and product managers continue to set software development goals, it makes sense that developers will end up embedded on business teams rather than technology teams. A report from Forrester Research looking at trends for 2020 refers to this shift as [the \"dev diaspora\"](https://www.ciodive.com/news/forresters-predictions-software-development/566257/) and predicts that after some bumps in the road it will improve productivity and release speed. The basic message: Software is critical to business success so should be located *with* the business rather than in the IT department.\n\n## New colleagues and culture\n\nWith developers moving \"into the business\" and a growing emphasis on citizen devs, it's clear not all teams will be filled with hardcore coders. Some certainly will be working alongside citizen developers. But others may also find \"team members\" in unexpected places, like their IDEs. Although artificial intelligence is still nascent in most enterprise development teams, some industry analysts are bullish that AI can bring speed, advice, structure, and perhaps even coding to the table in three to five years from now.\n\nHowever, no matter how quickly AI ends up as part of the pro developer work experience it's clear dev culture is going to have to change if \"development\" is no longer such a specialized skill.\n\n## Yet another shift left\n\nSecurity, test, and automation may have already shifted left so now get ready for customers to shift left, warns [Kenny Johnston](/company/team/#kencjohnston), senior director of product management, Ops at GitLab. \"If you want to have a complete view of [DevOps](/topics/devops/) you have to understand how your application is actually interacting with customers and you have to have that understanding from the early stages of development,\" says Kenny. Traditionally, developers have been largely absent from customer interactions, but that's going to change moving forward. GitLab's director of product management, CI/CD [Jason Yavorska](/company/team/#jyavorska) says one way to make customers real is for developers to take on a design mindset. \"You want to be thinking about the customer and building features together with the customer while both of you are connecting as humans.\"\n\nIf that's a step too far in the near term, Kenny has a medium-term strategy: Focus on what happens when something goes wrong. If developers want to write code that satisfies increasingly demanding customers, Kenny suggests starting with a simple question: \"What's the customer doing when the application fails?\"\n\n## Stop monitoring. Start observing\n\nA new focus on customers and the accompanying tightened feedback loops means more data than ever is likely to be heading to developers. That's not sustainable, says GitLab's senior developer evangelist [Brendan O'Leary](/company/team/#brendan). Constant alerting doesn't tell a developer much, which is why he sees teams moving away from monitoring and toward observability. \"You want to be able to observe how a complex system operates rather than monitoring it,\" he says. \"It's like the difference between Jane Goodall and a gorilla.\" Dr. Goodall can decode what she sees and put it in context, a practice that is tremendously valuable for devs (and ops pros for that matter) to establish. \"If we want to make technology easier to process we need to be able to get this information to developers in a meaningful way,\" he explains. \"We want to be able to show developers exactly what people normally do or where users get stuck in a particular area... all of that is better information than the data they have right now. We have to find a way to surface the information that matters.\"\n\n## Double down on open source\n\nMore than 60% of those who took our DevSecOps Survey are active participants in open source projects and DevOps practitioners expect that will continue to increase. At a time when the world is changing quickly, open source can be a way reticent developers can push themselves to learn about other parts of the business, says [Rafael Garcia](https://www.linkedin.com/in/jrafaelgarcia/), director of digital services at insurance provider Aflac. \"If you're actively engaging in open source that's one way of looking at cross-company projects,\" he says. \"Maybe there's a passion project that's outside of your core business roles or functions. It's a way of doing things that are outside your comfort zone.\"\n\nOpen source also provides a window into the way other companies operate, something developers need a better sense of in a world undergoing big changes, says [Jose Manrique Lopez de la Fuente](https://www.linkedin.com/in/jose-manrique-lopez-de-la-fuente-b869884/), CEO at Bitergia (and a [GitLab Hero](/community/heroes/)). Developers need to understand and cultivate the open source culture, he says. \"You don't have to start from scratch,\" says Manrique. And looking at how other companies manage their open source culture will reinforce that belief. \"It's not only about the skills or the components but also about the relationships with the maintainers of the components. You benefit if you can create a wider community.\"\n\n_Our [2022 Global DevSecOps Survey](/developer-survey/) is out now! Learn the latest in DevOps insights from over 5,000 DevOps professionals._\n",[9,681,726],{"slug":7417,"featured":6,"template":686},"software-developer-changing-role","content:en-us:blog:software-developer-changing-role.yml","Software Developer Changing Role","en-us/blog/software-developer-changing-role.yml","en-us/blog/software-developer-changing-role",{"_path":7423,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7424,"content":7430,"config":7435,"_id":7437,"_type":14,"title":7438,"_source":16,"_file":7439,"_stem":7440,"_extension":19},"/en-us/blog/software-test-at-gitlab",{"title":7425,"description":7426,"ogTitle":7425,"ogDescription":7426,"noIndex":6,"ogImage":7427,"ogUrl":7428,"ogSiteName":670,"ogType":671,"canonicalUrls":7428,"schema":7429},"An inside look at software testing at GitLab","Director of quality engineering Mek Stittri talks test technology and the future of automation at GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680800/Blog/Hero%20Images/softwaretestlaunch.jpg","https://about.gitlab.com/blog/software-test-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"An inside look at software testing at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2019-08-30\",\n      }",{"title":7425,"description":7426,"authors":7431,"heroImage":7427,"date":7432,"body":7433,"category":679,"tags":7434},[851],"2019-08-30","\n\n_In our [just-released survey of over 4,000 developers, security\nprofessionals, and operations team members](/developer-survey/), there was one thing everyone agreed on: 50% of each group\nsaid software testing is the biggest reason why development is delayed. Testers have long\nbeen the underdogs in the SDLC and that viewpoint is apparently very slow to change.\nTo understand what’s really going on, and how things work at GitLab, we\nasked [Mek Stittri](/company/team/#mekdev), director of quality engineering, to share his\nperspective on what’s working with test today and what’s in need of improvement._\n\n## Why is test a continued DevOps problem?\n\nIt’s a two-part answer, Mek says. First, there are simply not enough tests run and second, the tests that are used are often flaky (meaning their results aren’t necessarily trustworthy).\n\nTackling the issue of not running enough tests, Mek says it’s an area GitLab is addressing. “At GitLab, I think we are better than other companies where developers write unit tests and integration tests every time a change goes in,” he says. “That is great, but that testing is at a lower level, and it doesn't really map to a business use case.” To write better tests a team needs test requirements, but there can be so many different sets of stakeholders that it can be tough to get their input about *test* requirements and not just feature requirements. “We are improving it here at GitLab where our VP of Product [Scott Williamson](https://gitlab.com/sfwgitlab) is doing a great job. We have a section for test requirements right now (in the issue and merge request templates). It's now a blank and free form for people to fill in, but it should be highlighted going forward as a required section taking input from product discovery and validation as a deliverable.”\n\nThe bottom line: the stakeholders who are delivering the code need to understand the end goal better. “Unit tests test code at a smaller scale, and that’s great, but it doesn’t really verify the functionality works end to end as a whole. We need more coverage and more understanding of what needs to be tested.”\n\n![The Apollo 11 launch framework](https://about.gitlab.com/images/blogimages/apollo11framework.png){: .shadow.small.center}\nApollo 11 is held up by a framework and software is no different.\n{: .note.text-center}\n\nMek likens this process to Apollo 11. Everyone is excited about the rocket (the software features, in other words) but no one pays attention to the red scaffolding on the right that’s actually holding the rocket up. “That’s the side that nobody looks at but it’s a lot of work,” he says. “It’s taller than the rocket. We need to build that platform to have adequate testing (functional, performance, etc).” The ideal situation to get a company there? Start building the test framework and add test coverage at the exact same time the product is being built. “You assemble it together, run it, it’s passing and we go for launch and it’s shipped. We’re not there yet. And I can assure you a lot of companies out there aren’t there yet either.”\n\n## About those flaky tests…\n\n“There are a lot of test automation engineers and test developers out there, but not all of them know how to write and design a good test,” Mek explains. Automated tests needs to function like a flow of self-retrying dominoes where if one step is not completed it needs to keep retrying to reach the next step. Tests need to mimic what a manual tester would do, he says. No manual tester is going to click on a button and then wait 10 minutes. The tester will click again, or try other strategies. “At GitLab [we put emphasis on test framework reliability](/handbook/engineering/quality/#test-framework-reliability-and-efficiency) and we treat each user workflow step like a piece of retrying dominoes. We need to make sure all the dominoes fall over so the workflow is completed,” Mek says.\n\n>We need more coverage and more understanding of what needs to be tested.\n\nSo companies need to think through how the tests work, but also test the right things. If that happens, quality can be everyone’s responsibility in the end, Mek says. “We want developers to contribute to the end-to-end test so you want to make a test framework that is easy to use and easy to read. I think this all factors in.” And Mek points out it really is in everyone’s best interests to think about quality first. “Let's make the process better so we work smarter, right? We achieve more without having to work weekends or get pinged during your family dinner. Nobody wants that.”\n\n## Test automation and machine learning\n\nTest automation is a cornerstone of successful [DevOps](/topics/devops/) but it remains difficult for many companies to achieve. Mek’s take: “We need to design the product such that the test automation framework can integrate into it well,” he says flatly. That requires good collaboration with development teams due to frontend UI locators and backend APIs that are the interfaces to enable better and stable test automation. “Go back to Apollo 11,” Mek says. “It's like the connections along the rocket's fuselage. I need to integrate with this to make sure things are working fine. The probes and sensors need to be there. So if those aren't there, then your test automation engineers need to code around these obstacles. It's not working smart.” In other words, the test automation framework should not take the longer route when executing user interactions to the application because this can be the source of unstable and in-efficient tests.\n\nOne step that can help companies – including GitLab – get there is [machine learning](https://medium.com/machine-learning-for-humans/why-machine-learning-matters-6164faf1df12). “We are having discussions here at GitLab about where we want a bot,” Mek says. “I think machine learning will come and help, but the input and output needs to be clearly defined so you have a clear implementation direction, TensorFlow, Linear Regression, or whatever techniques. You can write a bot that just lives in the product, meaning it looks at all the UI locators (dedicated to test automation) on a page and randomly clicks one of those links.” This GitLab bot of the future will work 24/7, clicking, clicking, clicking on the page until it errors out or runs into a 404, Mek says. The goal is to create a bot that is like a “menacing QA engineer” that can be programmed to keep banging on the problematic areas until everything is solved. To get there will require lots of data – machine learning literally needs to learn from data and experience – and although there are a handful of companies experimenting with this now, this is all still very early stage.\n\n## Where we’re headed with testing\n\nMek and his team hope to increase both quality and productivity this year which may be a bit of a balancing act, since more “quality” equals more testing which can result in a longer development cycle and perhaps reduced productivity (this is why we say test automation engineers are often unappreciated!). “My department is working this quarter to have a full suite of automated tests for our enterprise features. We want to have a big checkbox for the enterprise features every time we deploy. We need this because it is mapping to the business use case.” But Mek and team need to do all of that while shortening the test runtime for developers. “You want more test coverage but we need to keep the runtime low because we can’t have developers and release managers wait two hours.”\n\nThe plan is to add more runners, optimize them, de-duplicate some tests and make sure the process is as streamlined as it can be. “Right now it takes about an hour or so, but I would love to have it down to 30 minutes where we certify that this merge request going in checks all the boxes and all the enterprise features are not broken. We need to set ourselves an aggressive goal and I would say 30 minutes is a good first step.”\n\nCover image by [Kurt Cotoaga](https://unsplash.com/@kydroon) on [Unsplash](https://unsplash.com)\n{: .note}\n",[728,1158,9,683],{"slug":7436,"featured":6,"template":686},"software-test-at-gitlab","content:en-us:blog:software-test-at-gitlab.yml","Software Test At Gitlab","en-us/blog/software-test-at-gitlab.yml","en-us/blog/software-test-at-gitlab",{"_path":7442,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7443,"content":7449,"config":7454,"_id":7456,"_type":14,"title":7457,"_source":16,"_file":7458,"_stem":7459,"_extension":19},"/en-us/blog/southwest-looking-to-help-developers-take-flight",{"title":7444,"description":7445,"ogTitle":7444,"ogDescription":7445,"noIndex":6,"ogImage":7446,"ogUrl":7447,"ogSiteName":670,"ogType":671,"canonicalUrls":7447,"schema":7448},"Southwest looking to help developers take flight","Learn how the airline's DevOps teams are dramatically increasing their ability to detect and resolve problems with GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665272/Blog/Hero%20Images/AdobeStock_380312133.jpg","https://about.gitlab.com/blog/southwest-looking-to-help-developers-take-flight","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Southwest looking to help developers take flight\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2024-01-30\",\n      }",{"title":7444,"description":7445,"authors":7450,"heroImage":7446,"date":7451,"body":7452,"category":1827,"tags":7453},[810],"2024-01-30","Southwest Airlines Co. is working to make developers’ jobs easier.\n\nIT leaders at the world's largest low-cost carrier are moving to eliminate time-consuming and repetitive tasks from developers’ workflows, freeing their time and increasing their ability to focus on bigger projects.\n\n“The way we do that is by getting things out of their way,” said Jim Dayton, vice president and CISO at Southwest Airlines. “I am a firm believer that people go into software development because they love the creativity of it. They love the ability to solve problems. What we have to do is get out of their way and get the things that are blocking them out of their way.”\n\nPart of how Dayton is making that happen is by using GitLab’s platform.\n\nDayton talked about Southwest’s efforts to take care of their developers, and promote the work they’re doing, during an on-stage interview at the Dallas stop of [GitLab’s DevSecOps World Tour](https://about.gitlab.com/events/devsecops-world-tour/). He also spent part of his conversation with Reshmi Krishna, director of Enterprise Solutions Architecture at GitLab, discussing what benefits he hopes artificial intelligence capabilities will be able to offer his teams.\n\nThe Southwest exec, who said they’re moving toward a DevOps approach to application development, added that they’re providing developers with more self-service capabilities and knowledge management processes. “We want developers to be able to quickly look up a problem, look up a solution, and reduce context switching,” he said. “We need to be able to look at what we are asking them to do and what's preventing them from being able to be productive.”\n\nDayton noted that Southwest, which established a relationship with GitLab in 2019, is focused on creating consistency for its software development processes. In part, that means moving code into a shared GitLab repository. By knowing where all of their code resides, teams will be able to more easily evaluate metrics, and begin to look at creating efficiencies by reusing code. \n\n“We’re also in the process of getting our enterprise pipelines finalized and we’re ready to start migrating teams onto them,” said Dayton. “We're collaborating heavily with a lot of different application development teams to understand what they need in the pipelines that we're building and we’re getting ready to start migrating teams onto them. I think we'll be getting pretty close by the end of the year.”\n\n### The promise of AI\n\nUsing artificial intelligence is one of the ways to enable developers to focus on bigger, more innovative tasks, Dayton explained.\n\nGenerative AI, whether in the form of vulnerability explainers, code suggestions, or code completion, has the ability to dramatically affect workflows across the entire software development lifecycle. Leveraging AI tools built into a platform can increase security and decrease time spent on code reviews and application development.\n\nDayton is looking forward to being able to use AI features to speed and ease development and deployment.\n\n“We want to get the mundane and the bureaucratic out of their way as much as possible,” Dayton said, adding that while there’s a lot of hype around AI, there’s also a lot of promise. “Using AI could do that. I think a great example will be when it can provide a solution to a vulnerability that was just identified or when it can tell us what a piece of code is doing. What is it integrating with? What data is it accessing and why? Tell me in plain English, for example, that this particular set of coding has been responsible for 20% of the incidents in this application over the past year. That’s where I think AI can help.”\n\nDayton noted that he doesn’t believe AI will replace developers. Instead, it should make their jobs easier. Another way AI can help is by connecting developers in a time when many are working remotely post-COVID.\n\n“One of the cool things that's in [GitLab’s] roadmap is Suggested Reviewers,” he said. “Getting help with code reviews used to involve yelling across the room or over a cube wall, ‘Hey, can someone look at my code?’ That’s not so easy now. AI can suggest someone who's actually worked in that code before or who has resolved incidents in that code and does that sort of thing. How much value is that going to add to the review process? I think the more automation we can put in, the less manual steps or wait states there will be.”\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/UnUfp7pKnEQ?si=qcX2Qm3zpgQOV4xy\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n*Southwest Airlines is a nearly $24 billion company based in Dallas, Texas. With 72,000 employees, it flies to 120 destinations, making 4,000 flights per day.  Southwest flies more domestic passengers than any other airline.\nRead more GitLab customer stories on our [customers page](https://about.gitlab.com/customers/).*\n",[9,2534,1181,793],{"slug":7455,"featured":6,"template":686},"southwest-looking-to-help-developers-take-flight","content:en-us:blog:southwest-looking-to-help-developers-take-flight.yml","Southwest Looking To Help Developers Take Flight","en-us/blog/southwest-looking-to-help-developers-take-flight.yml","en-us/blog/southwest-looking-to-help-developers-take-flight",{"_path":7461,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7462,"content":7467,"config":7472,"_id":7474,"_type":14,"title":7475,"_source":16,"_file":7476,"_stem":7477,"_extension":19},"/en-us/blog/speed-secure-software-delivery-devsecops",{"title":7463,"description":7464,"ogTitle":7463,"ogDescription":7464,"noIndex":6,"ogImage":1193,"ogUrl":7465,"ogSiteName":670,"ogType":671,"canonicalUrls":7465,"schema":7466},"Speed up secure software delivery with DevSecOps","It’s time to shift left: Embed security into your DevOps workflow to increase speed, quality, and efficiency in the SDLC.","https://about.gitlab.com/blog/speed-secure-software-delivery-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Speed up secure software delivery with DevSecOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2019-04-30\",\n      }",{"title":7463,"description":7464,"authors":7468,"heroImage":1193,"date":7469,"body":7470,"category":679,"tags":7471},[1016],"2019-04-30","\n\nDevOps is a revolutionary step forward in efficient software delivery, but teams\noften face painful delays when releases are put through security testing.\nSecurity is critical for every digital entity, but often adds tension to a\nprocess that is already under pressure for speed and cost efficiency. For many,\nsoftware delivery resembles an assembly-line style of work where employees have\nto constantly stop and start their work on different projects, breaking\ntheir mental flow and straining relationships between teams.\n\nTo illustrate, let’s trade software for [Ford’s Model Ts](https://www.history.com/this-day-in-history/fords-assembly-line-starts-rolling)\nfor a minute. Software development closely resembles development of those first cars\nmanufactured by Ford: Each worker makes a contribution and hands off to the\nnext, and then the security pros take it for a test drive (or look for\nvulnerabilities). But if the car doesn’t function properly, it’s sent back to\nthe beginning of the line to the developers who have already begun working on\na different vehicle.\n\nBack to software. How can teams solve this back-and-forth without foregoing\nquality? They must embed security into the development workflow.\n\n## Integrate and automate end-to-end security\n\nWhen security is embedded into the developer workflow, developers can respond\nto vulnerability alerts _while_ they’re writing code. Within the developer's\npipeline report in GitLab, individual vulnerabilities are presented to the developer for\nreview. Alerts could include unsafe code, dangerous attributes, and other\nvulnerabilities that could put your application at risk. The developer is able\nto look into each alert, determine whether it needs to be addressed or can be\ndismissed, and then address each alert while moving through the\ndevelopment process. In the [Security Group Dashboard](https://docs.gitlab.com/ee/user/application_security/security_dashboard/), the security analyst is able to see which alerts the developer was unable to resolve as well as what\nwas dismissed, making sure no vulnerabilities slip through the cracks.\n\n### Gain speed and efficiency with DevSecOps\n\nEmbedded security checks allow developers to pass off a streamlined workflow to\ntheir security peers. Security then focuses on the most important risks and\nthreats with the typical mountain of checks reduced to a much shorter list.\nShortened test times lead to much faster releases: Wag! (a dog-walking app)\n[brought their release time down from 40 minutes to just six.](/blog/wag-labs-blog-post/)\n\nStandard release processes place an unnecessary burden on your teams when a\nlimited number of engineers can work on them and project handoff actually\nimpedes completion. The ability to work concurrently within the same environment\nrepresents much more than a shift left: It redefines the entire DevOps\nlifecycle, enabling greater efficiency and collaboration on a single source\nof truth.\n\n### How it works\n\n[Static application security testing (SAST)](https://docs.gitlab.com/ee/user/application_security/sast/)\nbrings vulnerabilities to developers so they can review gaps in their code\n_within_ their own working environment before passing the project off to\nsecurity. This integration mitigates the friction that often stands between dev\nand security, allowing security to graduate from roadblock status to critical\nworkflow component. The collaborative nature of [SAST within tools like GitLab](https://docs.gitlab.com/ee/user/application_security/sast/)\nallows different teams to access the project at any time, eliminating any\ncumbersome linear processes and breaking down silos within the larger\norganization.\n\n## Accelerate delivery and build productivity by testing closer to remediation\n\nShifting left might ring alarm bells for some, but don’t worry – developers\nwon’t be solving _every_ security problem. The idea is to alert your dev team to\nthe code fixes that would be easiest for them to solve, rather than making the\nsecurity team do the digging. This switch will streamline the overall workflow,\nallowing the security team to focus on more critical risks and reducing handoff\nbetween security and dev.\n\n[DevSecOps](/topics/devsecops/) integrates security into your CI/CD processes, allowing your teams to\nwork quickly, collaborate efficiently, and produce secure and\nquality software at every release.\n\nAre you ready to build security into your DevOps practices? [Just commit.](https://about.gitlab.com/solutions/security-compliance/)\n{: .alert .alert-gitlab-purple .text-center}\n",[109,9,875],{"slug":7473,"featured":6,"template":686},"speed-secure-software-delivery-devsecops","content:en-us:blog:speed-secure-software-delivery-devsecops.yml","Speed Secure Software Delivery Devsecops","en-us/blog/speed-secure-software-delivery-devsecops.yml","en-us/blog/speed-secure-software-delivery-devsecops",{"_path":7479,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7480,"content":7486,"config":7491,"_id":7493,"_type":14,"title":7494,"_source":16,"_file":7495,"_stem":7496,"_extension":19},"/en-us/blog/speed-security-devops",{"title":7481,"description":7482,"ogTitle":7481,"ogDescription":7482,"noIndex":6,"ogImage":7483,"ogUrl":7484,"ogSiteName":670,"ogType":671,"canonicalUrls":7484,"schema":7485},"How to ensure security at the speed of DevOps","Read here on how to speed up your secure DevOps for faster delivery on your safe and secure applications.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678356/Blog/Hero%20Images/balance-speed-security-devops.jpg","https://about.gitlab.com/blog/speed-security-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to ensure security at the speed of DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2019-10-31\",\n      }",{"title":7481,"description":7482,"authors":7487,"heroImage":7483,"date":7488,"body":7489,"category":679,"tags":7490},[1016],"2019-10-31","\nChoosing between speed and security leaves some development teams walking a fine\nline between order and chaos. Even in [DevOps](/topics/devops/), if your security practices are\nstill largely manual, teams often choose to release apps before they’re fully\nsecured, rather than waiting for the security team to address critical\nvulnerabilities.\n\nBut what if I told you that you don’t need to choose? Pull your security team,\ntests and practices to the beginning of the SDLC, and embed them throughout to\nreduce time to launch – and launch a secure product.\n\n## Six ways to bring security up to speed\n\n### 1. Make small, frequent changes\nProduce code in small chunks or units, and then run automated tests on those\nunits as they’re committed, so the developers can remediate any\nvulnerabilities on the spot – rather than waiting for feedback days, weeks, or\neven months later. Running regular tests saves time down the road, when the completed\napp is tested before launch.\n\n### 2. Educate developers _and_ security teams\nAdopt or create an educational program that teaches developers to recognize\ncommon vulnerabilities and remediate on their own. Security professionals should\nalso be educated on application development and emerging technology,\nso they can understand developers’ work and ensure their organization isn’t\noverlooking any major vulnerabilities.\n\n### 3. Fail fast, fix fast\nFailing fast is a critical component of the DevOps mindset – and should be\napplied to developers’ security practices as well. If the automated scans\nreveal vulnerabilities, developers should be encouraged to take\nremediation into their own hands, both as a form of self-education, and to keep\nthe SDLC moving quickly.\n\n### 4. Prioritize risks\nRisks will take different levels of priority within a single app, or across all\nof an organization’s apps. DevOps and security teams should work together to\nestablish security guidelines that allow teams to prioritize which risks to\naddress immediately, and which may not need remediation in the short term.\n[Joe Coletta of IBM brings up an important distinction](https://securityintelligence.com/how-to-balance-speed-and-security-in-your-application-security-program/):\nFlaws should be assessed not only by level of severity, but also by likelihood\nof exploitation by an attacker.\n\n### 5. Automate as much as possible\nManual security processes cannot keep up – point blank. There are too many new\ntechnologies, deployments, and access requests for security teams to manually\nhandle everything. Tests should be pre-written and policies pre-defined so\nthat they’re addressed automatically within the development pipeline. Automation\nalso allows developers to focus on business demands – getting the app out\nquickly – while reducing the chance for human error.\n\n### 6. More is better\nTesting more frequently is always better, if it can be done efficiently. In rapid\ndevelopment, teams push small changes continuously, which also means they’re\nable to find vulnerabilities more easily, and push small fixes continuously.\n[As Forrester Research Director Amy DeMartine has stated](https://techbeacon.com/app-dev-testing/has-continuous-security-arrived-rise-rapid-development),\nany changes that developers make [using these methods] will only affect their\nsmall piece of code, without any ramifications on the rest. Ultimately, this\nincreases quality.\n\n## Like always, communication is key\n\nAbove all, your security and DevOps teams **must** be on the same page: A cross-team\nsecurity mindset requires a strong commitment to communication and transparency. Leaders should encourage\nmembers of both teams take initiative to understand the other team’s goals and\nintent, and why these goals are important to both the business and customer. Teams at\nevery business should focus on building a security-first mindset, as today’s\nexpanding attack surfaces provide opportunity for exploitation at every level.\nLastly, make it easy (or as easy as it can be). Integrated tools, or a single\ntool for the entire lifecycle (such as GitLab) will bring transparency to all\nsides of the operation and allow for seamless interactions, change logging,\nand efficiency.\n\nCover image by [Christian Englmeier](https://unsplash.com/@christianem) on [Unsplash](https://unsplash.com/photos/J7EUjSlNQtg)\n{: .note}\n",[875,9,2243],{"slug":7492,"featured":6,"template":686},"speed-security-devops","content:en-us:blog:speed-security-devops.yml","Speed Security Devops","en-us/blog/speed-security-devops.yml","en-us/blog/speed-security-devops",{"_path":7498,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7499,"content":7504,"config":7510,"_id":7512,"_type":14,"title":7513,"_source":16,"_file":7514,"_stem":7515,"_extension":19},"/en-us/blog/sre-shadow",{"title":7500,"description":7501,"ogTitle":7500,"ogDescription":7501,"noIndex":6,"ogImage":6145,"ogUrl":7502,"ogSiteName":670,"ogType":671,"canonicalUrls":7502,"schema":7503},"My week shadowing a GitLab Site Reliability Engineer","On-call through the eyes of a software engineer. Read Tristan's week shadowing a GitLab Site Reliability Engineer","https://about.gitlab.com/blog/sre-shadow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"My week shadowing a GitLab Site Reliability Engineer\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tristan Read\"}],\n        \"datePublished\": \"2019-12-16\",\n      }",{"title":7500,"description":7501,"authors":7505,"heroImage":6145,"date":7507,"body":7508,"category":1359,"tags":7509},[7506],"Tristan Read","2019-12-16","\n{::options parse_block_html=\"true\" /}\n\n## Site Reliability Engineer (SRE) - Shadow\n\nFirst-off I'll introduce myself - I'm [@tristan.read](https://gitlab.com/tristan.read), a Frontend Engineer in GitLab's [Monitor::Health Group](https://about.gitlab.com/handbook/engineering/development/ops/monitor/respond/). Last week I had the privilege of shadowing one of GitLab's on-call SREs. The purpose was to observe day-to-day incident response activities and gain some real-life experience with the job. We'd like all of our engineers to better understand and develop empathy for users of Monitor::Health [features](https://about.gitlab.com/handbook/product/categories/#health-group).\n\nThe idea for the week is to follow everything the SRE does. This means I attended handover meetings, watched the same alert channels, and joined incident response calls if and when they occurred.\n\n## Incidents\n\nTwo incidents occurred during the week while I was shadowing.\n\n### 1. Crypto miner\n\nOn Wednesday a jump in [GitLab Runner](https://docs.gitlab.com/runner/) usage was detected on GitLab.com - this was caused by a user attempting to use runner minutes to mine crypto coins. This was dealt with by using an in-house abuse mitigation tool, which stops the runner jobs and removes the associated project and account.\n\nHad this event not been spotted it would have been caught by an automated tool, but in this case the SRE spotted it first. An incident issue was raised for this, but it remains private.\n\n### 2. Performance degredation on Canary and Main applications\n\nThis incident was triggered by slowdowns and increased error rates appearing on GitLab.com's canary and main web applications. Several Application Performance Index (Apdex) Service Level Objectives (SLO) were violated.\n\nPublic incident issue: [https://gitlab.com/gitlab-com/gl-infra/production/issues/1442](https://gitlab.com/gitlab-com/gl-infra/production/issues/1442)\n\n## Key Takeaways\n\nThese are some things that I learned during the week on-call.\n\n### 1. Alerting is most useful when it detects a change from the norm\n\nAlerts can be split into several types:\n1. Alerts based on a specific threshold value, such as \"10 5xx errors occurred per second\".\n2. Alerts where the threshold is a percentage value, such as \"5xx error rate at 10% of total requests at this time\".\n3. Alerts that are based on a historical average, such as \"5xx error rate is in the 90th percentile\".\n\nIn general, types 2 and 3 are more useful for on-call SREs, as they reveal that something out of normal is occurring.\n\n### 2. Many alerts don't progress into an actual incident\n\nSREs deal with a constant stream of alerts. Many of these aren't super time-critical.\n\nWhy don't they limit the alerts to only things that are critical? This approach might cause early symptoms to be missed until they snowball into a higher impact issue.\n\nIt's the job of the on-call SRE to decide which alerts indicate something serious and when to escalate or investigate further. I suspect this may also be caused by inflexibility in alerts - it could be better to have more alert levels, or 'smarter' ways of setting up alerts as described above.\n\nFeature proposal: [https://gitlab.com/gitlab-org/gitlab/issues/42633](https://gitlab.com/gitlab-org/gitlab/issues/42633)\n\n### 3. Our on-call SREs use many tools\n\nInternal:\n- **GitLab infra project**: Runbooks live here. Handovers for the on-call shift/week. Incident Response issues.\n- **GitLab issues**: Investigation, post-mortems and maintenance work are also tracked with issues.\n- **GitLab labels**: Automation is triggered by certain labels using bots that monitor issue activity.\n\nExternal:\n- **PagerDuty**: Alerts\n- **Slack**: PagerDuty/AlertManager stream posted here. Integration with slash-commands to perform various tasks - e.g. resolve an alert or escalate an alert into an incident.\n- **Grafana**: Visualization of metrics with a focus on long-term trends.\n- **Kibana**: Provides visualization / log searching. Ability to drill into specific events.\n- **Zoom**: There is an always-running \"Situation Room\" Zoom call. This allows us SREs to quickly discuss events without wasting valuable time creating a meeting room and communicating the links out to everyone.\n\nAnd many many more.\n\n### 4. Monitoring GitLab.com with GitLab represents a single point of failure\n\nIf GitLab.com has a major outage, we don't want that affecting our ability to resolve the problem. This could be mitigated by running a second GitLab instance for operating GitLab.com. In fact, we already have this with [https://ops.gitlab.net/](https://ops.gitlab.net/).\n\n### 5. Some features we should consider adding to GitLab\n\n- [Multi-user edit on issues](https://gitlab.com/gitlab-org/gitlab/issues/103531), similar to Google Docs. This would aid both Incident issues in the middle of an event, and post-mortem issues. Both are situations when multiple people might want to add things to an issue in real-time.\n- More webhooks for issues. Being able to trigger different steps of the workflow from within GitLab will help reduce the reliance on Slack integrations. For instance: being able to resolve an alert in PagerDuty via a slash-command in a GitLab issue.\n\n## Conclusion\n\nSREs have a tough job with many complexities. It would be great to see more GitLab products in use solving these problems. We're already working on some additions to the product that will help with the above workflows. See the [Ops Section Product Vision](https://about.gitlab.com/direction/ops/) for more details.\n\nIn 2020 we'll be expanding the team so that we can build all of these exciting features. Please see [our vacancies](https://about.gitlab.com/jobs/) if you're interested, and feel free to get in touch with one of our team members if you have questions about the role.\n\nCover image by [Chris Liverani](https://unsplash.com/@chrisliverani?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[9,1339],{"slug":7511,"featured":6,"template":686},"sre-shadow","content:en-us:blog:sre-shadow.yml","Sre Shadow","en-us/blog/sre-shadow.yml","en-us/blog/sre-shadow",{"_path":7517,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7518,"content":7524,"config":7529,"_id":7531,"_type":14,"title":7532,"_source":16,"_file":7533,"_stem":7534,"_extension":19},"/en-us/blog/strategies-microservices-architecture",{"title":7519,"description":7520,"ogTitle":7519,"ogDescription":7520,"noIndex":6,"ogImage":7521,"ogUrl":7522,"ogSiteName":670,"ogType":671,"canonicalUrls":7522,"schema":7523},"Implementing microservices architectures and deployment strategies","Want to dump the monolith and get into microservices? Consider these three methods.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662898/Blog/Hero%20Images/microservices-explosion.jpg","https://about.gitlab.com/blog/strategies-microservices-architecture","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Implementing microservices architectures and deployment strategies\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-06-17\",\n      }",{"title":7519,"description":7520,"authors":7525,"heroImage":7521,"date":7526,"body":7527,"category":679,"tags":7528},[788],"2019-06-17","\n\nMicroservices can have a major impact on organizations looking to increase automation and deployment speed. The biggest companies in the world – Amazon, Netflix, Google, etc. – all work on this architecture model and release at lightning speed. So why is using microservices so effective? The easiest way to understand [microservices architecture](/blog/what-are-the-benefits-of-a-microservices-architecture/) is by comparing it to its counterpart – the monolith.\n\nWith a monolithic architecture, all of the components are part of a single unit: Everything is developed, deployed, and scaled together. In comparison, [microservices](/topics/microservices/) have each component broken out and deployed individually as services, and these services communicate with each other via API calls. For complex applications that need to run at scale, microservices can offer greater flexibility, reliability, and a faster pace of innovation than monoliths.\n\nNo, monoliths aren’t inherently bad, but teams stuck in a monolith system often sacrifice speed for simplicity, and that could haunt them in the long term. So what do you do when you want to make the switch to microservices and start implementing faster? Consider these options.\n\n## The strangler method\n\n[Martin Fowler’s strangler method](https://www.martinfowler.com/bliki/StranglerApplication.html) was inspired by a trip he took to Australia:\n\n> “One of the natural wonders of this area [Australia] is the huge strangler vines. They seed in the upper branches of a fig tree and gradually work their way down the tree until they root in the soil. Over many years they grow into fantastic and beautiful shapes, meanwhile strangling and killing the tree that was their host.”\n\nIt sounds brutal based on this description, but it’s actually one of the gentlest and most effective transitions for an organization. Essentially, parts of the monolith become microservices little by little until eventually the monolith is cut out completely. The benefit is that this transition is much more gradual, so uptime and availability are largely unaffected while the organization modernizes. The con? Speed.\n\n## The Lego strategy\n\nLet’s say you don’t necessarily want to ditch the monolith completely. Maybe it has a valuable use for a certain product or facet of the organization, or maybe you just don’t have the resources to dismantle it or don’t want to. The Lego strategy could be the right choice.\n\nThe team at Kong use this term because you’re essentially building on top of what you already have (like Lego blocks). Instead of switching over to microservices completely, you commit to [building new features as microservices](https://konghq.com/blog/transition-to-microservices-what-now/) while still keeping the existing monolithic codebase. While this approach doesn’t fix current issues, it will help with future expansions and buy much-needed time. This hybrid environment can exist relatively pain-free but has some risks: Increased technical debt, navigating code versions between the monolith and the new microservices features, and maintenance costs.\n\n## The nuclear option\n\nImagine: Your monolith is kaput, finito, dunzo. It can’t be fixed and it can’t stay. What now? As the name suggests, going nuclear is the riskiest and rarest option of all. The upside is that you can start from scratch. The downside is... you start from scratch. This approach is risky because you do run the risk of downtime when everything shifts over to microservices – which is a real no-no for user experience. Infrastructure is best when it’s invisible, and a new microservices architecture won’t win back the favor of users that were inconvenienced. Then again, maybe your new microservices architecture was built perfectly and cloud, software, and staff are perfectly in place and users will never know the difference. That’s the risk of a full rip-and-replace.\n\n## A successful transition to microservices\n\n[The team at Verizon was able to reduce its data center deploys from 30 days to _under eight hours_ by utilizing microservices](/blog/verizon-customer-story/), and their application modernization strategy centered around four key goals:\n\n*   Architecture\n*   Automation\n*   Extensibility\n*   Being proactive\n\nBy having clear goals throughout the process, the Verizon team was able to remove manual deployments and streamline their processes. When adopting a microservices model, it helps to have some clear objectives about what you would like to achieve, and prioritize certain outcomes over others. Modernization projects almost never go according to plan, and if you have to make tough decisions, having a list of ‘must-haves’ can guide the conversation.\n\nThe oldest argument for monoliths has always been their simplicity: They’re easy to build and easy to run. While it was once difficult to develop applications with a microservices architecture, over the past five years it has become considerably easier with container orchestration tools like Kubernetes, [comprehensive CI/CD tools](/solutions/continuous-integration/) that automate testing and deployments, and APIs that update automatically. Developers can focus on innovating rather than completing manual tasks and maintaining legacy systems. Organizations that adopt microservices get their simplicity through automated processes, and while it’s not as simple as a monolith, the benefits far outweigh the cons.\n\nRegardless of which method you choose, the willingness to modernize to the latest [DevOps](/topics/devops/) architecture is the most important first step. Ready to dive into microservices?\n\n[Just commit](/blog/application-modernization-best-practices/).\n{: .alert .alert-gitlab-purple .text-center}\n",[9,109],{"slug":7530,"featured":6,"template":686},"strategies-microservices-architecture","content:en-us:blog:strategies-microservices-architecture.yml","Strategies Microservices Architecture","en-us/blog/strategies-microservices-architecture.yml","en-us/blog/strategies-microservices-architecture",{"_path":7536,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7537,"content":7542,"config":7548,"_id":7550,"_type":14,"title":7551,"_source":16,"_file":7552,"_stem":7553,"_extension":19},"/en-us/blog/summarize-issues",{"title":7538,"description":7539,"ogTitle":7538,"ogDescription":7539,"noIndex":6,"ogImage":2857,"ogUrl":7540,"ogSiteName":670,"ogType":671,"canonicalUrls":7540,"schema":7541},"ML experiment: Summarizing issue comments","Learn how GitLab is experimenting with ML-powered issue comment summarization in this fifth installment of our ongoing AI/ML in DevSecOps series.","https://about.gitlab.com/blog/summarize-issues","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"ML experiment: Summarizing issue comments\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Melissa Ushakov\"},{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2023-04-13\",\n      }",{"title":7538,"description":7539,"authors":7543,"heroImage":2857,"date":7545,"body":7546,"category":1178,"tags":7547},[7544,2862],"Melissa Ushakov","2023-04-13","\n\n\u003Ci>This blog post is part of an ongoing series about GitLab's journey to [build and integrate AI/ML into our DevSecOps platform](/blog/ai-ml-in-devsecops-series/). The series starts here: [What the ML is up with DevSecOps and AI?](/blog/what-the-ml-ai/). Throughout the series, we'll feature blogs from our product, engineering, and UX teams to showcase how we're infusing AI/ML into GitLab.\u003C/i>\n\n[GitLab issues](https://docs.gitlab.com/ee/user/project/issues/) are essential for team collaboration and serve as the source of truth for teams to align on the problem definition and scope of work for ongoing efforts. As teams collaborate on issues to refine them, the volume of comments grows. For issues with many comments, it can be challenging to understand the status of work at a glance. You may need to spend significant time reading comments to get an overview of the decisions made so far and to understand, for example, if there are any blockers. \n\nWith the recent advancements in AI and natural language processing, it's now possible for AI models to summarize text like that found in issue comments. We believe that this technology will help teams use their time more efficiently and help prevent losing track of important information within issue comments.\n\nLarge language models (LLMs) power generative AI solutions by using deep learning algorithms to analyze vast amounts of natural language data. These models are trained on massive datasets to develop an understanding of language patterns and context. Once trained, the models can generate new text that mimics human language.\n\nIn a rapid prototype, our own [Alexandru Croitor](https://gitlab.com/acroitor), Senior Backend Engineer, and [Nicolas Dunlar](https://gitlab.com/nicolasdular), Senior Frontend Engineer for our [Plan stage](/handbook/product/categories/#plan-stage), leverage generative AI LLMs to power comment summarization within [GitLab's issues](https://docs.gitlab.com/ee/user/project/issues/).\n\n![Prototype UX for comment summary](https://about.gitlab.com/images/blogimages/Issue_comment_summary_blog.gif){: .shadow}\n\nAbove, you can see an example of triggering the summarization of issue comments. Watch the full demo below. \n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/GMr3eHwbYAI\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\nOur experiment takes an individual's natural language comments, inferences them against a generative AI LLM, and through novel prompt engineering (the task of guiding LLM output through instructions), creates a summary of long comment threads. Part of our engineering exploration is examining how to chunk extremely long comment threads into parsable bits an LLM can succinctly and accurately summarize. \n\n## Iterating on AI/ML features\nWhile just an experiment today, we are iterating on how to effectively bring features like this to our customers. We're starting with summarization of issue comments, and are working to optimize prompts to provide more meaningful summaries. We are also investigating bringing this functionality to other objects like epics and merge requests.\n\nThis experiment is just the start of many ways we’re looking to infuse GitLab with AI/ML capabilities to help GitLab users become more efficient and effective at their jobs. We are [looking across the software development lifecycle](/blog/what-the-ml-ai/) for painful and time-consuming tasks that are ideal for AI Assisted features. We’ll continue to share these demos throughout this blog series.\n\nInterested in using these AI-generated features? [Join our waitlist](https://forms.gle/9eeUkPJauKsbLaoz5) and share your ideas.\n\nContinue reading our ongoing series, \"[AI/ML in DevSecOps](/blog/ai-ml-in-devsecops-series/)\".\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[9,1180,916,1181],{"slug":7549,"featured":6,"template":686},"summarize-issues","content:en-us:blog:summarize-issues.yml","Summarize Issues","en-us/blog/summarize-issues.yml","en-us/blog/summarize-issues",{"_path":7555,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7556,"content":7561,"config":7568,"_id":7570,"_type":14,"title":7571,"_source":16,"_file":7572,"_stem":7573,"_extension":19},"/en-us/blog/tackle-nists-plan-of-action-and-milestones-with-gitlabs-risk-management-features",{"title":7557,"description":7558,"ogTitle":7557,"ogDescription":7558,"noIndex":6,"ogImage":2016,"ogUrl":7559,"ogSiteName":670,"ogType":671,"canonicalUrls":7559,"schema":7560},"Managing risk with GitLab's plan of actions & milestones","The One DevOps Platform helps identify interdependencies and vulnerabilities as required by government compliance frameworks.","https://about.gitlab.com/blog/tackle-nists-plan-of-action-and-milestones-with-gitlabs-risk-management-features","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tackle a Plan of Actions and Milestones with GitLab’s risk management features\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sameer Kamani\"}],\n        \"datePublished\": \"2022-07-07\",\n      }",{"title":7562,"description":7558,"authors":7563,"heroImage":2016,"date":7565,"body":7566,"category":875,"tags":7567},"Tackle a Plan of Actions and Milestones with GitLab’s risk management features",[7564],"Sameer Kamani","2022-07-07","\n\nSoftware is an essential part of everyday life. More and more organizations are being forced to push software to consumers faster for a better customer experience. But increasing software delivery speed cannot come at the expense of security. This adds more pressure on internal development, security, change management, operations, and site reliability teams.\n\nShifting left to find security vulnerabilities earlier within the DevOps process is a critical aspect of ensuring security scales with the pace of development. But U.S. federal government operations go a step further with the implementation of the National Institute of Standards and Technology (NIST) Risk Management Framework ([RMF](https://csrc.nist.gov/projects/risk-management/about-rmf)). The RMF, implemented with standards such as NIST 800-53, NIST 800-171, and NIST 800-37 all require careful consideration of security vulnerabilities identified as properly managed risks. This is further recommended with NIST 800-160 and NIST 800-161.\n\nHowever, practically speaking, not even the most diligent IT team can ensure full compliance with every requirement. This is when risk management becomes more critical as it has to be [continuously monitored](/stages-devops-lifecycle/monitor/) and evaluated through the software development lifecycle (SDLC).\n\nGenerally, the prescribed methodology is to prepare a plan and document the tasks necessary to resolve risks, along with the resources required to do so. Due to interdependencies with other software components, milestones may also be needed to track the work. This is embodied in the Plan of Actions and Milestones (POA&M) process.\n\n## GitLab and the POA&M process\n\nThere are two aspects of identifying and managing vulnerabilities. First, there has to be a quick and relatively easy way to identify new vulnerabilities and zero-day exploits as they become public. Second, it should be possible to check for existing vulnerabilities periodically – ideally in an automated or ad-hoc way as new information becomes available and internal or external auditor reviews are conducted.\n\nNIST provides a sample POA&M template to help organizations track the actions needed. But in our experience, the mental load to manage another separate document can be an added burden on all the teams, not to mention confusing as new versions of the information become available. GitLab provides numerous resources to assist with this process.\n\n## Using GitLab to identify vulnerabilities\n\nGitLab has multiple types of [security and compliance scanners](https://docs.gitlab.com/ee/user/application_security/) that evaluate source code in various ways. These scanners are capable of finding security weaknesses introduced in new code, vulnerable dependencies, container images, and non-compliant licenses from third-party code. These scans can run against every commit on every feature branch – before any code is merged or deployed into production. \n\n![GitLab scanning](https://about.gitlab.com/images/blogimages/poamprocess.png){: .shadow}\n\nAs potential security issues are found, GitLab provides an aggregated view of the findings both in the developer workflow and in dedicated vulnerability management tools. GitLab’s [Vulnerability Reports](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/) allow security teams the ability to triage and manage vulnerabilities for individual projects or across groups of projects. From here, security teams can evaluate vulnerabilities, track remediation progress, or dismiss any false positives.  \n\nThis provides a direct way to find, catalog, and manage vulnerabilities. As this process moves further along, and vulnerabilities are characterized as a risk, GitLab provides a one-click process to convert and link the vulnerability with a work management item known as an Issue in GitLab. This can become a central location where, as per the POA&M process, it can be assigned to the Directly Responsible Individual (DRI), with due dates and milestones.  The Issue can also be placed into an Epic to manage larger, dependent, and correlated pieces of work. Labels and Issue Boards make it easier to manage these work items while adding visibility to all parties involved. This provides further transparency into how the work progresses and where more attention is needed.\n\nActive systems management processes such as the one provided natively by GitLab to scan, identify, manage, and develop plans for mitigation all in one system can be game-changing as they can bring an organization closer to achieving continuous monitoring and mitigation.\n\nThe downstream effect of having a single system like GitLab is that all the metrics from when something is found to when it is completed are tracked in a single source of truth. This can create powerful insights for future improvement.\n\nDiscover more about how GitLab can support your POA&M process so you can deliver secure software faster. \n\n[Talk to an expert](/sales/) about GitLab and NIST risk management compliance.\n",[9,2534,875],{"slug":7569,"featured":6,"template":686},"tackle-nists-plan-of-action-and-milestones-with-gitlabs-risk-management-features","content:en-us:blog:tackle-nists-plan-of-action-and-milestones-with-gitlabs-risk-management-features.yml","Tackle Nists Plan Of Action And Milestones With Gitlabs Risk Management Features","en-us/blog/tackle-nists-plan-of-action-and-milestones-with-gitlabs-risk-management-features.yml","en-us/blog/tackle-nists-plan-of-action-and-milestones-with-gitlabs-risk-management-features",{"_path":7575,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7576,"content":7581,"config":7586,"_id":7588,"_type":14,"title":7589,"_source":16,"_file":7590,"_stem":7591,"_extension":19},"/en-us/blog/take-our-survey-on-collaborative-software-development",{"title":7577,"description":7578,"ogTitle":7577,"ogDescription":7578,"noIndex":6,"ogImage":6915,"ogUrl":7579,"ogSiteName":670,"ogType":671,"canonicalUrls":7579,"schema":7580},"Take our survey on collaborative software development!","If you have 4 minutes, we'd love to hear about how your organization collaboratively develops software.","https://about.gitlab.com/blog/take-our-survey-on-collaborative-software-development","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Take our survey on collaborative software development!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-10-12\",\n      }",{"title":7577,"description":7578,"authors":7582,"heroImage":6915,"date":7583,"body":7584,"category":769,"tags":7585},[745],"2022-10-12","\u003Cdiv data-tf-widget=\"hYFvwmA0\" data-tf-iframe-props=\"title=How collaborative is your software organization?\" data-tf-medium=\"snippet\" style=\"width:100%;height:400px;\">\u003C/div>\u003Cscript src=\"//embed.typeform.com/next/embed.js\">\u003C/script>\n",[9],{"slug":7587,"featured":6,"template":686},"take-our-survey-on-collaborative-software-development","content:en-us:blog:take-our-survey-on-collaborative-software-development.yml","Take Our Survey On Collaborative Software Development","en-us/blog/take-our-survey-on-collaborative-software-development.yml","en-us/blog/take-our-survey-on-collaborative-software-development",{"_path":7593,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7594,"content":7600,"config":7605,"_id":7607,"_type":14,"title":7608,"_source":16,"_file":7609,"_stem":7610,"_extension":19},"/en-us/blog/tasktop-gitlab-integration",{"title":7595,"description":7596,"ogTitle":7595,"ogDescription":7596,"noIndex":6,"ogImage":7597,"ogUrl":7598,"ogSiteName":670,"ogType":671,"canonicalUrls":7598,"schema":7599},"One step closer to DevOps success with GitLab + Tasktop","Good news for enterprise devs: flow GitLab Issues into your Agile tool for greater visibility and collaboration.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671305/Blog/Hero%20Images/tasktop-integration-cover.png","https://about.gitlab.com/blog/tasktop-gitlab-integration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"One step closer to DevOps success with GitLab + Tasktop\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2017-09-15\",\n      }",{"title":7595,"description":7596,"authors":7601,"heroImage":7597,"date":7602,"body":7603,"category":299,"tags":7604},[1378],"2017-09-15","\n\nTasktop, the value stream integration tool for enterprise development teams, has launched their [GitLab Issues Connector](http://www.tasktop.com/integrations/gitlab-issues). Now you can automatically flow GitLab issues bi-directionally into tools such as JIRA, CA Agile Central (formerly Rally), HPE ALM, and VersionOne, facilitating effective DevOps at scale.\n\n\u003C!-- more -->\n\nGitLab Issues have endless applications: from proposing new features, to discussing implementation of new ideas, to obtaining support, an issue can serve a host of different functions.\n\nSo we're happy that Tasktop now makes it even easier to integrate issues into your workflow. It works with both the [Community and Enterprise editions](/stages-devops-lifecycle/) of GitLab to flow issues from GitLab into separate purpose-built tools, such as a development team’s Agile planning tools. An issue is then visible in both GitLab and your planning tool, and is always reflective of the current, up-to-date state. This means everyone is in the picture, whether the bulk of their work is done in GitLab or another tool.\n\nThis is great news for cross-functional teams and [collaboration](https://handbook.gitlab.com/handbook/values/#collaboration) (it's no secret, we're big fans).\n\n## We asked Tasktop, \"Why GitLab?\"\n\n\"There was (and is) strong customer demand for GitLab. We built this integration as it expands our ecosystem, especially in the Issue Tracker space. We see customers needing to connect relatively lightweight issue trackers with agile development tools, typically to allow defects to be sent to developers. GitLab Issues works with any of our connectors, but it makes the most sense when integrating with tools such as JIRA, CA Agile Central, or Microsoft TFS as customers seek to connect their software value stream from end-to-end.\" - Trevor Bruner, Product Manager, Tasktop\n\n## What we say\n\nGitLab VP of Product Job van der Voort:\n\n>\"GitLab allows anyone to reduce their time to value, bringing their ideas to production faster. With the Tasktop integration, yet another hurdle in going from one application to the next is removed. We're excited to see enterprises ship faster and more reliably.\"\n\nThe GitLab Issues Connector is now available in all editions of [Tasktop Integration Hub](https://www.tasktop.com/hub).\n\n*Is this integration the answer to your context-switching troubles? What will you use it for? Let us know in the comments!*\n",[231,9],{"slug":7606,"featured":6,"template":686},"tasktop-gitlab-integration","content:en-us:blog:tasktop-gitlab-integration.yml","Tasktop Gitlab Integration","en-us/blog/tasktop-gitlab-integration.yml","en-us/blog/tasktop-gitlab-integration",{"_path":7612,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7613,"content":7619,"config":7624,"_id":7626,"_type":14,"title":7627,"_source":16,"_file":7628,"_stem":7629,"_extension":19},"/en-us/blog/tech-debt",{"title":7614,"description":7615,"ogTitle":7614,"ogDescription":7615,"noIndex":6,"ogImage":7616,"ogUrl":7617,"ogSiteName":670,"ogType":671,"canonicalUrls":7617,"schema":7618},"How to use DevOps to pay off your technical debt","Technical debt is a universal problem with an equally universal solution – DevOps. Here's how DevOps can reduce the tech debt burden and help you deploy faster and more frequently.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681643/Blog/Hero%20Images/greenery.jpg","https://about.gitlab.com/blog/tech-debt","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use DevOps to pay off your technical debt\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-10-05\",\n      }",{"title":7614,"description":7615,"authors":7620,"heroImage":7616,"date":7621,"body":7622,"category":679,"tags":7623},[2002],"2020-10-05","\n\nOne of the primary resource constraints in the [DevOps](/topics/devops/) world is technical debt. Technical debt is a metaphor created by Ward Cunningham that compares the build-up of cruft (deficiencies in the internal quality of software systems) to the accumulation of financial debt, where the effort it takes to add new features is the interest paid on the debt, writes [Martin Fowler](https://martinfowler.com/bliki/TechnicalDebt.html).\n\nIt’s common for a busy developer to write code with known imperfections, but because the priority is to ship new features as quickly as possible, deliverables are often prioritized over correcting the inefficiencies in the process.\n\nOne of the major dilemmas with determining the value of spending precious time fixing cruft versus building new features is that the costs are not objectively measurable, says Fowler. Just like with paying off financial debt, the right call is largely circumstantial as opposed to absolute.\n\n\"Given this, usually the best route is to do what we usually do with financial debts, pay the principal off gradually,\" writes Fowler.\n\nBy cleaning up some of the cruft as you work on the new features, you ensure that the most relevant code is tidier for future iterations. When it comes to crufty, but stable, code, you can leave it alone. This method is similar to paying the monthly balance on a low interest rate loan – the impact is minimal.\n\n \"In contrast, areas of high activity need a zero-tolerance attitude to cruft, because the interest payments are cripplingly high,\" writes Fowler.\n\nOne way to start dealing with technical debt is to conduct a rough audit and triage your technical debt by \"interest rate\" – high interest rate cruft is addressed with the same priority as shipping new features, while medium-to-low interest rate cruft can be dealt with in a ratio that best suits your team’s situation, because dealing with your most urgent technical debt sooner rather than later will help you save resources in the long-term.\n\n## How tech debt accumulates in your workflow\n\nIt’s not just code that contains cruft. A lot of the time, we have cruft that slows down our engineering processes. When it comes to investing time and money into updating DevOps processes, it seems there is never enough of either resource.\n\n\"We don’t let our teams spend time on improving their process because we think it’s wasted effort,\" says [Brendan O’Leary](/company/team/#brendan), senior developer evangelist at GitLab. \"But if you can spend a day fixing some things that make your workflow inefficient, and you save an hour a week from now until eternity, that’s a big difference.\"\n\nTake for instance manual deployment versus the use of automated pipelines. We know that deploying manually takes an enormous amount of time, but the upfront cost of allocating time to building automated pipelines can seem daunting.\n\nIf your team is trapped in a time-consuming cycle of technical debt, take a peek at how Minnesota-based consulting firm, [BI Worldwide](/customers/bi-worldwide/) (BIW), was able to accelerate deployments by transitioning to GitLab. In the case study, the BIW Corporate Products Development Team explains how they were stuck in a rut of manual testing and manual deployments on their on-prem infrastructure. Their toolchains were complex and inefficient, which created a dense backlog.\n\n\"It was entirely time-consuming to apply all of those code changes,\" said Adam Dehnel, product architect, BIW, in the case study. As a result, deployments were infrequent and slow as too many features were crammed into each release.\n\nThe first step to increase the speed of their deployments was to update and modernize their processes.\n\n\"[BIW] had practices and tools in place at the time but were spending time on items that weren’t business differentiating features. They faced classic issues surrounding a lack of cross-team communication including inefficient mechanisms for intra-organization workflows and individualized toolsets.\"\n\nFirst, BIW made the painful transition from CVS to Git. Next, the company aimed to automate the build, test, and deployment process and built a toolchain with tools such as GitHub, Jenkins, JIRA, and Confluence.\n\nFor BIW, this complex toolchain was buggy. One thing that was not mentioned in this specific use case, but still merits recognition, is the hidden cost of maintaining all of these different tools.\n\n\"The argument to be made there is not only is it cost of using these various tools, but also that the more tools you have, there is the overhead cost of upgrading them, maintaining them, and integrating them,\" says Brendan. \"There’s a massive hidden cost behind the cost of doing business.\"\n\nIn the next iteration, BIW embraced the efficiency of an all-in-one tool by transitioning to GitLab.\n\nBIW went from a pre-Git pace of shipping a release every nine to 12 months to deploying nearly ten times a day using GitLab Ultimate, no doubt putting a serious dent in the technical debt that followed their slower, laborious release cycle.\n\n## Conserve valuable resources and pay off technical debt with DevOps\n\nIn a previous blog post, we examined [communication strategies to get non-technical stakeholders to buy-in to DevOps](/blog/devops-stakeholder-buyin/). DevOps can help you deploy faster and more frequently, giving your business an edge over the competition, but it is also a strategy for paying off your technical debt. By first taking into account inefficiencies in your code and engineering processes, you can make a rough triage of your team's technical debt. This type of audit is the first step to identifying cruft you can trim to help speed up your cycle time, clear your backlog, and modernize your engineering processes.\n\n## Read more\n\n- [Need DevOps buy-in? Here's how to convince stakeholders](/blog/devops-stakeholder-buyin/)\n- [A guide to cloud native storage for beginners](/blog/cloud-native-storage-beginners/)\n- [Want to iterate faster? Choose boring solutions](/blog/boring-solutions-faster-iteration/)\n\nCover Photo by [Vadim L](https://unsplash.com/@sk3tch?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/plants?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n",[728,855,9,109],{"slug":7625,"featured":6,"template":686},"tech-debt","content:en-us:blog:tech-debt.yml","Tech Debt","en-us/blog/tech-debt.yml","en-us/blog/tech-debt",{"_path":7631,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7632,"content":7638,"config":7643,"_id":7645,"_type":14,"title":7646,"_source":16,"_file":7647,"_stem":7648,"_extension":19},"/en-us/blog/ten-devops-terms",{"title":7633,"description":7634,"ogTitle":7633,"ogDescription":7634,"noIndex":6,"ogImage":7635,"ogUrl":7636,"ogSiteName":670,"ogType":671,"canonicalUrls":7636,"schema":7637},"DevOps terminology: 10 terms that might surprise you","From Yoda to yaks and even baklava, here are 10 DevOps terms we’re betting you’ve never heard of.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681526/Blog/Hero%20Images/devopsterms.jpg","https://about.gitlab.com/blog/ten-devops-terms","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevOps terminology: 10 terms that might surprise you\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-08-25\",\n      }",{"title":7633,"description":7634,"authors":7639,"heroImage":7635,"date":7640,"body":7641,"category":679,"tags":7642},[851],"2020-08-25","\n\nYou call yourself a [DevOps professional](/topics/devops/build-a-devops-team/) but do you know the definitions of yak shaving, Yoda conditions or baklava code?\n\nWe didn’t think so.\n\n## Benefits of DevOps\n\nDevOps outpaces the old software development methodologies like waterfall simply because it’s more efficient. Here are eight obvious DevOps wins:\n\n* Deployment is faster\n\n* Product quality is better\n\n* Automation simplifies the whole process\n\n* There’s flexible, continuous delivery\n\n* Scalability is even easier to achieve\n\n* Teams are transparent and communicative\n\n* There are faster fixes for bugs and other problems\n\n* It gives space to constantly iterate\n\nRegardless of your role on a business or a technical side, there are DevOps benefits for everyone.\n\n## DevOps terms and team communication\n\nA basic understanding of DevOps terms is important when it comes to optimal team communication. Otherwise, there are a lot of blank, blinking faces in the crowd. But even more important than simply understanding the terminology is consciously practicing good communication about DevOps and iterating on your team’s communication style.\n\nNew ideas, tools, and processes are constantly cropping up in the DevOps space, which means there is new terminology to learn. Great team communication involves continuously helping each other keep up with new knowledge and ensuring an environment of continuous learning.\n\n## DevOps terms glossary\n\nHere’s a look at our [DevOps](/topics/devops/) glossary with a focus on 10 DevOps terms even seasoned pros might not have encountered. And if you think there are some obscure ones we missed, please tell us about it [here](https://gitlab.com/gitlab-com/www-gitlab-com/-/issues/8878). We are working on a comprehensive GitLab guide to DevOps terms.\n\n### Devops term 1: Baklava code\n\n[Baklava](https://en.wikipedia.org/wiki/Baklava) is a dessert made up of many layers of thin phyllo dough – which is notoriously difficult to work with. Baklava code is the same: Lots of thin layers of code which makes it too fragile to stand up to real world use.\n\n### DevOps term 2: Dark launch\n\nA dark launch usually refers to a partial or incomplete release of a feature or features without any announcement. This under-the-radar release is a way to gather performance and testing data without the pressure of public input, because the features haven’t actually been talked about.\n\n### DevOps term 3: Dead code\n\nCode is considered \"dead\" if it lives in a program but actually doesn’t do anything and/or contribute to results or performance. Generally [dead code should be removed](https://refactoring.guru/smells/dead-code) as it’s a potential waste of space and computational power.\n\n### DevOps term 4: Everything-as-code\n\nEverything-as-code takes [infrastructure-as-code](https://searchitoperations.techtarget.com/definition/Infrastructure-as-Code-IAC) and goes one step further: Literally everything is treated as code including the infrastructure, virtual machines, and deployment configuration, to name a few. Everything-as-code is made possible by cloud native, proponents of it say it boosts traceability, repeatability, and testing. \n\n### DevOps term 5: Fear-driven development\n\nForget [FOMO](https://www.urbandictionary.com/define.php?term=Fomo), fear-driven development is what happens when project managers raise the stakes by moving up deadlines or laying people off. \n\n### DevOps term 6: NoOps\n\nIt’s DevOps without the \"Ops\" or what could happen if automation eliminates traditional ops tasks. Some see NoOps as the highest evolution of a successful DevOps practice while others don’t see it that way at all. NoOps joins a slew of other Ops-related terms including [GitOps](https://thenewstack.io/what-is-gitops-and-why-it-might-be-the-next-big-thing-for-devops/), [CIOps](https://dzone.com/articles/kubernetes-anti-patterns-lets-do-gitops-not-ciops), and more.\n\n### DevOps term 7: Rubberducking\n\nThis novel way of debugging code was made famous in the book [The Pragmatic Programmer](https://www.amazon.com/Pragmatic-Programmer-journey-mastery-Anniversary/dp/0135957052/ref=sr_1_1?dchild=1&keywords=the+pragmatic+programmer&qid=1598365813&sr=8-1). A programmer carries around a rubber duck and discovers that by explaining the code to the duck, line by line, the errors made themselves obvious. Translated for the real world, and practiced at GitLab, it means talking through your code with another developer which helps make flaws or logical errors more obvious.\n\n### DevOps term 8: Spaghetti code\n\nIf someone tells you your code is like spaghetti don’t take it as a compliment. Spaghetti code is all over the map, often with too many [GOTO statements](https://www.geeksforgeeks.org/goto-statement-in-c-cpp/). It’s poorly organized and often lacks any kind of traditional structure. \n\n### DevOps term 9: Yak shaving\n\nDuring a global pandemic when many are working from home, it’s safe to assume yak shaving is happening frequently, and it’s definitely a term that is used [outside of programming](https://americanexpress.io/yak-shaving/). In general, it means doing something that leads to something else but has nothing to do with the original goal. Programmers use it to refer to interminable tasks that must be done before a project can move forward, as in, \"I’ll get to that once I’ve shaved the yak.\"\n\n### DevOps term 10: Yoda conditions\n\n*Code you I will Luke Skywalker.* Yoda conditions refers to non-traditionally written code, i.e., code written as [Yoda](https://starwars.fandom.com/wiki/Yoda) speaks. Once you put yourself in the mindset it’s possible to understand what you’re looking at, but, just like Luke Skywalker experienced, it can take a while to get the hang of this.\n\n_Some of these are terms in use at GitLab, but in our research we stumbled across [the Coding Horror blog](https://blog.codinghorror.com/new-programming-jargon/) created by Jeff Atwood and we found a few new-to-us terms including Yoda conditions. Jeff refers to his list as the \"top 30 Stack Overflow new programming jargon entries.\"_\n\n## Growth of a DevOps culture\n\nA DevOps culture doesn’t grow simply because an organization decides to implement it. It takes daily, focused effort and cultivation. Some things organizations can do to foster the growth of a DevOps culture are to keep leadership in the loop, openly communicate across the team, and create a roadmap of shared goals and individual responsibilities to help achieve them. Understanding the lingo helps too!\n\nCover image by [Raphael Schaller](https://unsplash.com/@raphaelphotoch) on [Unsplash](https://unsplash.com)\n{: .note}\n",[9,728,683],{"slug":7644,"featured":6,"template":686},"ten-devops-terms","content:en-us:blog:ten-devops-terms.yml","Ten Devops Terms","en-us/blog/ten-devops-terms.yml","en-us/blog/ten-devops-terms",{"_path":7650,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7651,"content":7656,"config":7660,"_id":7662,"_type":14,"title":7663,"_source":16,"_file":7664,"_stem":7665,"_extension":19},"/en-us/blog/ten-reasons-why-your-business-needs-ci-cd",{"title":7652,"description":7653,"ogTitle":7652,"ogDescription":7653,"noIndex":6,"ogImage":990,"ogUrl":7654,"ogSiteName":670,"ogType":671,"canonicalUrls":7654,"schema":7655},"10 Reasons why your business needs CI/CD","Want to know why you should consider using CI/CD? Learn more here about the many business benefits of adopting a CI/CD workflow for you and your organization.","https://about.gitlab.com/blog/ten-reasons-why-your-business-needs-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"10 Reasons why your business needs CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-02-15\",\n      }",{"title":7652,"description":7653,"authors":7657,"heroImage":990,"date":2317,"body":7658,"category":679,"tags":7659},[851],"\nThere’s no escape: Your company is in the software business, even if it’s not. \n\nCompetitors, customers, investors, and employees are all demanding updated software on a regular basis, alongside whatever products your organization creates.\n\nSo embrace the reality (and [DevOps](/topics/devops/)) and invest in creating the most efficient continuous integration and delivery pipelines possible. Not sure how to sell this strategy to management? Start by pointing out it’s likely your closest competitor is already taking advantage of [continuous integration/continous delivery](/topics/ci-cd/)(CI/CD). And if you need more ammunition, here are 10 reasons why your business needs CI/CD.\n\n## What is CI/CD?\n\nCI/CD is a two-step process that dramatically streamlines code development and delivery using the power of automation. CI makes developer tasks like source code integration and version control more efficient so software can get into production faster. CD automates software testing and deployment. Together, CI/CD is a powerful and unmatched engine of modern software development and it has untold benefits for businesses.\n\n## What are the CI/CD benefits for business?\n\nCI/CD has numerous benefits for business. Here are 10 reasons to adopt CI/CD: \n\n* Ensure superior code quality\n\nIn our [2021 Global DevSecOps Survey](/developer-survey/), participants told us the number one reason to do DevOps is for code quality and, of course, the number one process teams need for DevOps is CI/CD. Because CI/CD pipelines offer test automation, developers can know about code problems nearly in real time. That concept of “failing fast” means teams aren’t wasting time or resources with buggy code, and devs aren’t plagued with endless “fix” requests when they’ve moved on to other projects. Time is saved, money is saved, and developers aren’t endlessly context switching… win, win, win.\n\n* Deliver faster with an accelerated release rate\n\nSkeptics about the benefits of CI/CD need only hear about global financial firm Goldman Sach’s success story: It’s Technology Division went from [one code build every two weeks to over 1,000 builds per day](/customers/goldman-sachs/). A unified CI/CD pipeline is like a turbo engine when it comes to boosting the rate of software releases. The faster code is released, the more new code can be developed, and then released, ad infinitum. The business bottom line: Expensive developer resources aren’t sitting idle when a successful CI/CD pipeline is in play.\n\n* CI/CD pipelines: Automation reduces the cost\n\nAnytime a human does not have to intervene in the software development process, time, and thus money, are saved. That’s why automation is the underpinning to successful DevOps practices. CI/CD automates the handoffs, the source code management, the version control system, the deployment mechanisms, and, of course, so much of the testing. \n\nOf all those, [testing](/blog/want-faster-releases-your-answer-lies-in-automated-software-testing/) is arguably the most important. In our 2021 survey, testing was identified as the number one reason releases were delayed. Not only do delayed releases impact the business from a cost, branding, public relations, and even a reputation perspective, they are deadly to businesses relying on speedy time-to-market. Historically software testing was manual and incredibly time-consuming, which is why companies only released new code once or twice a year. In today’s world, companies have to release all the time, and automated software testing is critical to making that possible.\n\n* Fault isolation\n\nBefore DevOps and CI/CD gained traction in software development, development teams would know there was an issue with code, but would struggle to know exactly *where* the problem was happening. CI/CD and its automated testing has changed that. Developers can easily identify and then isolate code faults, dramatically improving productivity. \n\n* Simplified rollback\n\nA CI/CD pipeline gives developers the power to fail fast and recover even faster. It’s a simple process to push code into production and, if there are issues, simply roll it back. The ability to easily rollback code saves teams time, energy, and resources and leads to faster fixes of problem code. \n\n* Continuous feedback\n\nA unified CI/CD process, operating as part of a DevOps platform, gives everyone on the team – including business stakeholders – a way to see what’s happening, where it’s happening, and what might be going wrong. This sounds like a simple thing, but in reality, a single window into software development is almost revolutionary.\n\nIn the past, there were simply _so many tools_ in play that a project manager might have to look in a number of places, and ask a number of people, to get status updates. Developers and operations pros fared no better. Obviously that was a waste of time and resources, particularly when problems arose. \n\n* Optimum transparency and accountability\n\nThanks to continuous feedback, a CI/CD pipeline makes the entire software development process completely transparent to the business side. Product managers can check project status in a glance and track accountability as needed. \n\n* Improved mean time to resolution (MTTR)\n\nThanks to the visibility provided by a CI/CD pipeline, DevOps teams see issues quickly and can fix them fast. The ability to rapidly resolve problems lies at the heart of a key development metric: mean time to resolution, or MTTR. The better the MTTR, the more efficiently the DevOps team is working and the more quickly software can be released; in other words, MTTR has a direct effect on a business’s bottom line. \n\n* Monitoring metrics data\n\nTeams and the business side need to know how code is functioning in the real world, but in traditional software development practices monitoring metrics are often absent. In an ideal world, teams would know there was a code problem and roll it back long before end users realized it. A CI/CD pipeline makes that “ideal world” a reality by [delivering continuous feedback on a variety of metrics](https://about.gitlab.com/topics/ci-cd/continuous-integration-metrics/). Access to metrics data is more than just a time-saver, however, as no organization wants to be associated with bug-ridden code and applications that don’t perform well. \n\n* Reduction of non-critical defects in backlog\n\nBy now it’s clear CI/CD is a time and money saver, so much so that it gives developers time to work on things they wouldn’t normally be able to, such as going back to fix older code and make it cleaner and more efficient. The idea that devs cannot only tackle the backlog (it’s called a backlog for a reason after all – who has time for this?), but also work on non-critical defects, is a game-changer brought to teams by DevOps and CI/CD.\n",[9,109,749],{"slug":7661,"featured":6,"template":686},"ten-reasons-why-your-business-needs-ci-cd","content:en-us:blog:ten-reasons-why-your-business-needs-ci-cd.yml","Ten Reasons Why Your Business Needs Ci Cd","en-us/blog/ten-reasons-why-your-business-needs-ci-cd.yml","en-us/blog/ten-reasons-why-your-business-needs-ci-cd",{"_path":7667,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7668,"content":7673,"config":7679,"_id":7681,"_type":14,"title":7682,"_source":16,"_file":7683,"_stem":7684,"_extension":19},"/en-us/blog/test-automation-devops",{"title":7669,"description":7670,"ogTitle":7669,"ogDescription":7670,"noIndex":6,"ogImage":5149,"ogUrl":7671,"ogSiteName":670,"ogType":671,"canonicalUrls":7671,"schema":7672},"Trust, but verify: The importance of software test automation","Guest author Steve Ropa explains what a Cold War era motto has to do with test automation (seriously) and bringing development and operations closer together.","https://about.gitlab.com/blog/test-automation-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Trust, but verify: The importance of software test automation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Steve Ropa\"}],\n        \"datePublished\": \"2018-04-23\",\n      }",{"title":7669,"description":7670,"authors":7674,"heroImage":5149,"date":7676,"body":7677,"category":679,"tags":7678},[7675],"Steve Ropa","2018-04-23","\nThis article is about software [test automation and DevOps](/topics/devops/), but bear with me a moment before we go there. Back during the Cold War, there were many discussions about how to improve relations and reduce the number of nuclear missiles countries were pointing at each other. During these talks, one of the biggest sticking points was how much the two countries could trust each other, and if they couldn’t, how they maintained their sovereignty while under some form of arms control agreement. They eventually agreed upon a regime of agreeing to perform certain actions, with periodic inspections on each side to confirm that said actions would happen, based on the Russian proverb, “trust, but verify.”\n\n## Trust but verify: DevOps requires trust\n\n“Trust, but verify” is commonly used with reference to security, but needs to be the motto for a good DevOps automation pipeline as well. First, for all the developers who ask, “Why should I care about DevOps?” I just want to mention that the key to success in *any* type of development is a short feedback loop and truly safe code. DevOps is a fantastic mechanism for helping us achieve that, by taking the valuable technical practices we’ve learned and applying them frequently and smoothly. DevOps as a cultural phenomenon relies heavily on Lean principles and on the concept of “trust-based management” where we put our trust in the teams to be professional craftsmen. This is important and can’t be overstated. We hire professional developers to do professional development, and we must trust them to get the job done.\n\n## Trust but verify: Today’s world requires verification\n\nAnd yet, there is a challenge. In most cases, software is not being created in a vacuum. We must make sure we are integrating well with the rest of the system. Sometimes, we have legal requirements like [Sarbanes-Oxley compliance](https://en.wikipedia.org/wiki/Sarbanes%E2%80%93Oxley_Act). In the past, we would schedule a large block of time to do all the integration and compliance testing. Now, we are saying, “Trust us, we will do the right thing” and deploying to production as quickly as is practicable. This is where automated testing comes into play.\n\n## Three categories of verification\n\nLet’s break this into three categories: Unit, System, and Compliance. This may not be the breakdown we usually think of, but bear with me. Unit testing is the most commonly discussed level of testing among modern developers. We write small, concise tests that exercise the code under development, ensuring it does what we intend it to do. Preferably we are doing test-driven development, and writing the tests prior to the code, but that is a blog for another time. Under the heading of “trust, but verify,” rather than slow down our development cycle with code review gates and, dare I say it, maybe even merge requests, we allow teams to check code in whenever they feel the need. Then we verify that we haven’t done anything untoward by running the automated unit tests on the build environment. This level of verification isn’t enough to ensure a truly high-quality system, but the lack of this level of verification is a definite step on the road to perdition.\n\n## System-level verification\n\nI like to use System testing to describe the various next level tests. This would be any system-wide tests, such as Acceptance, Integration, and possibly Performance testing. Unfortunately, many development teams stop at automated unit tests. They give lip service to automating other tests “when we have time.” And we know when that is. So, it gets skipped. Or is treated as a luxury. Since we often don’t have enough time to do all this testing manually either, we end up with defects and low-quality user experiences, which erodes trust in our teams’ ability to create and innovate. This downward spiral usually ends with organizations building giant processes and Change Advisory Boards to slow down the pace of change, all in the name of safety.\n\nSo instead, we trust our team to create innovative software, working closely with their customer or other representatives of said customer. To verify the teams are creating the right software, we represent the needs of the customer in terms of tests. These tests are automated to begin with, again preferably before creating the actual product. Then, we include the automated System tests into the DevOps pipeline, running with each check in. Now we can feel safe that the system is stable and represent the customers’ needs to the best of our ability to understand them.\n\n## Safety\n\nLastly, we need to verify that our teams are creating safe software. There are some excellent tools for automated security and safe programming scans. Include these as well into your pipeline. If they take too long, you can consider an alternate pipeline that runs less frequently, but start by running with each check in, until you feel that it just is getting too bogged down.\n\nIn the end, we are back to the basic statement, “trust but verify.” We won’t put massive processes and boards of review in place, slowing down the pace of development. We won’t create giant overarching architectures and just allow our developers to “fill in the blanks.” We will present them with the needs of the system and trust them to develop great software. Meanwhile, we will support them by verifying, many times a day, that they are still on track. Hey, if it worked for nuclear weapons, surely it can work for software.\n\n## About the guest author\n\nSteve Ropa is a Co-founder and Master Craftsman at the [Rocky Mountain Programmers Guild](https://www.rmprogrammers.com/) in Denver, Colorado, where he brings his long career of successful software delivery to elevate developers and teams to new levels of performance and Craftsmanship.\n\n[Photo](https://unsplash.com/photos/GNyy-D-SNN8?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) by Guillaume Lebelt on [Unsplash](https://unsplash.com/search/photos/patterns?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,1158],{"slug":7680,"featured":6,"template":686},"test-automation-devops","content:en-us:blog:test-automation-devops.yml","Test Automation Devops","en-us/blog/test-automation-devops.yml","en-us/blog/test-automation-devops",{"_path":7686,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7687,"content":7693,"config":7698,"_id":7700,"_type":14,"title":7701,"_source":16,"_file":7702,"_stem":7703,"_extension":19},"/en-us/blog/the-best-of-gitlabs-devops-platform-2021",{"title":7688,"description":7689,"ogTitle":7688,"ogDescription":7689,"noIndex":6,"ogImage":7690,"ogUrl":7691,"ogSiteName":670,"ogType":671,"canonicalUrls":7691,"schema":7692},"The best of GitLab's DevOps Platform 2021","Some highlights from last year, and what to expect from 2022.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667509/Blog/Hero%20Images/continuous-integration-from-jenkins-to-gitlab-using-docker.jpg","https://about.gitlab.com/blog/the-best-of-gitlabs-devops-platform-2021","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The best of GitLab's DevOps Platform 2021\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2022-02-18\",\n      }",{"title":7688,"description":7689,"authors":7694,"heroImage":7690,"date":7695,"body":7696,"category":769,"tags":7697},[766],"2022-02-18","\nBefore we get too far into 2022, we wanted to take a look back at the most exciting additions to our [DevOps Platform](/topics/devops-platform/) over the last year. Since we release every month on the 22nd, there were lots of new features to consider, but these stood out to me.\n\n## Epic Boards\n\nIn [GitLab 14.0](/releases/2021/06/22/gitlab-14-0-released/#epic-boards), we made it easy to keep track of all epics in one place through Epic Boards. Our Epic Boards are customizable with a simple “drag and drop” interface accessible to all teammates, not just the technical ones. Now it’s painless to create general or DevOps-focused workflow states. And teams aren’t just more efficient, they can actually be predictable.\n\nExplore our [Epic Boards](https://docs.gitlab.com/ee/user/group/epics/epic_boards.html).\n\n## Integrations with VS Code and Gitpod\n\nFans of Visual Studio Code got a much tighter integration with GitLab in 2021. The [GitLab Workflow Extension](https://docs.gitlab.com/ee/user/project/repository/vscode.html) reduces context switching and improves productivity. And we rounded up [8 ways to get the most out of VS Code and GitLab](/blog/vscode-workflows-for-working-with-gitlab/).\n\nGitLab also created a tighter integration with Gitpod. Developers can now set up environments as code, greatly [speeding up the process](/blog/teams-gitpod-integration-gitlab-speed-up-development/). I think this Gitpod integration is so slick I used it to [code, build and deploy from an iPad](/blog/how-to-code-build-and-deploy-from-an-ipad-using-gitlab-and-gitpod/). Gitpod and its features give developers an opportunity to think outside the box.\n\n## So much security\n\nIn 2021 we gave security pros a true “home” in GitLab with our security dashboard. Teams can now [see vulnerabilities in a pipeline](https://docs.gitlab.com/ee/user/application_security/security_dashboard/) and easily slice and dice that data as necessary.\n\nStatic application security testing (SAST) also got an upgrade last year. We now have nextgen SAST that will [reduce Ruby false positives](/releases/2021/09/22/gitlab-14-3-released/#next-generation-sast-to-reduce-ruby-false-positives) as well as the ability to automatically test [Infrastructure as Code](/releases/2021/11/22/gitlab-14-5-released/) for the first time.\n\n## Praefect for Gitaly\n\nCustomers who want high availability on their own instances now can use Praefect, [our Gitaly clustering solution](/blog/high-availability-git-storage-with-praefect/), that allows Git to scale. Here’s what you [need to know](https://docs.gitlab.com/ee/administration/gitaly/praefect.html) about configuring a Gitaly cluster.\n\n## A visual pipeline editor\n\nIt’s hard to build it if you can’t see it, and that’s where our Pipeline Editor comes in. Use Pipeline Editor to [quickly set up CI/CD](/blog/pipeline-editor-overview/) because it’s now easy to see configurations and dependencies between jobs. Validate and visualize [all parts of the pipeline](https://docs.gitlab.com/ee/ci/pipeline_editor/) without feeling overwhelmed by the complexity.\n\n## Working with (and on) OpenShift\n\nIt’s now possible to set up a GitLab Runner for [Red Hat’s popular OpenShift infrastructure](https://docs.gitlab.com/runner/install/operator.html). Organizations relying on OpenShift can now use [the GitLab Operator](https://about.gitlab.com/blog/open-shift-ga/) to easily tap into the power of GitLab’s DevOps Platform.\n\n## The GitLab Agent for Kubernetes\n\nLast fall we announced an easier way to tackle GitLab and Kubernetes integrations in a secure and cloud-friendly way: [The GitLab Agent for Kubernetes](https://docs.gitlab.com/ee/user/clusters/agent/). We call this `agentk` and here’s [everything you need to know](/blog/setting-up-the-k-agent/) about set up.\n\n## 2021 and 2022\n\nIf I had to sum it up, I’d say that in 2021 we doubled down on security. And this year, expect us to double down on operations, specifically observability, thanks to our [acquisition of Opstrace](/press/releases/2021-12-14-gitlab-acquires-opstrace-to-expand-its-devops-platform-with-open-source-observability-solution.html). It’s going to be an exciting ride!\n",[9,875,749],{"slug":7699,"featured":6,"template":686},"the-best-of-gitlabs-devops-platform-2021","content:en-us:blog:the-best-of-gitlabs-devops-platform-2021.yml","The Best Of Gitlabs Devops Platform 2021","en-us/blog/the-best-of-gitlabs-devops-platform-2021.yml","en-us/blog/the-best-of-gitlabs-devops-platform-2021",{"_path":7705,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7706,"content":7711,"config":7716,"_id":7718,"_type":14,"title":7719,"_source":16,"_file":7720,"_stem":7721,"_extension":19},"/en-us/blog/the-changing-roles-in-devsecops",{"title":7707,"description":7708,"ogTitle":7707,"ogDescription":7708,"noIndex":6,"ogImage":4958,"ogUrl":7709,"ogSiteName":670,"ogType":671,"canonicalUrls":7709,"schema":7710},"Why - and how - DevOps roles are changing","Our 2022 Global DevSecOps Survey finds developers in ops and security while operations is everywhere.","https://about.gitlab.com/blog/the-changing-roles-in-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why - and how - DevOps roles are changing\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-08-31\",\n      }",{"title":7707,"description":7708,"authors":7712,"heroImage":4958,"date":7713,"body":7714,"category":679,"tags":7715},[851],"2022-08-31","\nFor three years, developers, security team members, and operations professionals have suggested to us in our annual surveys that their responsibilities were shifting. But this year that “shift” became a tidal wave of change - and that change is towards [DevSecOps](/topics/devsecops/).\n\nIn our [2022 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/previous/2022/), more than 5,000 practitioners shared details of DevOps roles in a state of flux: devs taking on ops and security tasks, security working hand-in-hand with dev teams, and ops wearing an improbable number of hats. \n\nThese are big changes, but surprisingly not chaotic ones. In fact, at a time of great technical and macroeconomic upheaval, the evolution of DevOps jobs and responsibilities seems to be designed to bring teams more tightly together. DevOps is more than 14 years old at this point – an argument could be made that [true collaboration](/blog/collaboration-communication-best-practices/) is finally underway. \n\nWhatever is at play, it’s clear substantive changes are happening. Here’s what our respondents told us about their new responsibilities.\n\n## DIY and developers\n\nToday’s developers are literally DIYing all the (ops) things. This year, 38% reported instrumenting code they’ve written for production monitoring, up 12% from 2021 and more than double the percentage in 2020. The same percentage of devs monitor and respond to the infrastructure, up 13% from last year, and 36% are “on call” for app-in-production alerts. Some devs are now authoring runbooks for apps in production and even serving as a point-of-contact if something is escalated.\n\nThey’re also spending a lot more time on toolchains. Nearly 40% spend between one-quarter and one-half of their time [maintaining or integrating toolchains](/blog/too-many-toolchains-a-devops-platform-migration-is-the-answer/) (more than double last year’s percentage), while a full third of devs spend between half and **all** of their time on this task.\n\nAnd, in any time left over, devs are digging into security, so much so that 53% said they are fully responsible for security in their organizations.\n\nAt the same time, devs are also opting out of tasks that have long been in their wheelhouse, including testing, manual testing, code review, documenting code changes, and planning.\n\n## Security is a team sport\n\nNo longer a siloed group, security team members are literally rolling up their sleeves and joining in. Almost 29% of those surveyed said they’re now part of a cross-functional team and 35% are more involved with teams and “hands on” with DevOps projects, an 11-point jump over 2021.\n\nFor the first time ever, 7% of security team respondents said they have more influence on engineering decisions; a small percentage, but it’s a start for a group traditionally viewed as not part of the “team.”\n\n## Nimble ops pros\n\nWhile devs are busy with what have been traditional ops roles, ops pros are off-roading with responsibilities not really seen in DevOps teams before. For the past few years, ops has reported splitting time between managing infrastructure and managing the cloud, and that didn’t change dramatically in 2022. But when they’re not juggling the cloud and infrastructure, ops is taking on a number of new challenges, including DevOps coaching, responsibility for automation, overseeing [all compliance efforts](/blog/the-importance-of-compliance-in-devops/), and [platform engineering](/topics/devops/what-is-a-devops-platform-engineer/). \n\nAnd, as if that isn’t enough, 48% of ops pros said they feel fully responsible for security in their organizations.\n\nWant to know more about how DevOps roles are changing? Read our [2022 Global DevSecOps Survey](/developer-survey/).\n",[681,9,813,875],{"slug":7717,"featured":6,"template":686},"the-changing-roles-in-devsecops","content:en-us:blog:the-changing-roles-in-devsecops.yml","The Changing Roles In Devsecops","en-us/blog/the-changing-roles-in-devsecops.yml","en-us/blog/the-changing-roles-in-devsecops",{"_path":7723,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7724,"content":7729,"config":7734,"_id":7736,"_type":14,"title":7737,"_source":16,"_file":7738,"_stem":7739,"_extension":19},"/en-us/blog/the-code-review-struggle-is-real-heres-what-you-need-to-know",{"title":7725,"description":7726,"ogTitle":7725,"ogDescription":7726,"noIndex":6,"ogImage":1801,"ogUrl":7727,"ogSiteName":670,"ogType":671,"canonicalUrls":7727,"schema":7728},"The code review struggle is real. Here's what you need to know","If it's time for a DevOps Platform, don't forget the role code review plays. Our 2021 Global DevSecOps Survey showed why it's both critical and tricky to get right.","https://about.gitlab.com/blog/the-code-review-struggle-is-real-heres-what-you-need-to-know","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The code review struggle is real. Here's what you need to know\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2021-09-03\",\n      }",{"title":7725,"description":7726,"authors":7730,"heroImage":1801,"date":7731,"body":7732,"category":769,"tags":7733},[851],"2021-09-03","\n_Our [2022 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/previous/2022/) is out now! Learn the latest in DevOps insights from over 5,000 DevOps professionals._\n\nWhen making a list of the reasons to consider moving to a DevOps Platform, don't forget about code review, a critical piece of the process that's also an incredible source of frustration and delays to developers and their teams.\n\nIn our [2021 Global DevSecOps Survey](/developer-survey/), respondents told us code quality was the number one reason they chose DevOps. But, when asked what was most likely to delay a product release, code review – vital to code quality – was one of the top four culprits (the others were testing, planning and code development). \n\nThe fact that code review is a pain point is hardly surprising, given that it can often require context-switching, communication, collaboration, and of course subject matter expertise. At a time when it's never been more urgent to release secure code as quickly as possible, it's not a stretch to think code reviews can feel like a hard stop to some teams, particularly if the process is not integrated into an existing workflow.\n\n**[Here's everything you need to know about a [DevOps Platform](/topics/devops-platform/)]**\n\n## Why code review is painful\n\nIn fact, when we asked our survey respondents to tell us in their own words what they struggle with when it comes to code review, they had \\*a lot\\* to say on the subject.\n\n*\"Code reviews can take a long time due to the lack of reviewers.\"*\n\n*\"Many people find it a chore to review code.\"*\n\n*\"We have a strict code review process and it often takes several days for the reviewer to respond to requests for review.\"*\n\n*\"Code review takes time and every developer has to explain how they achieved what they did.\"*\n\n*\"Developers are sometimes unaware they have to do code reviews. They aren't sure how to perform them and if they are effective. Sometimes they are skipped so the process can go through.\"*\n\n*\"Finding someone for code review can be hard (1-day average). After that, business tests take time to be completed (2-4 days on average).\"*\n\n[Code review is tricky](/blog/challenges-of-code-reviews/), but almost 60% of those surveyed said the reviews were \"very valuable\" in ensuring code quality and security. And it's not like teams aren't actually tackling code review: In 2021 close to 45% of respondents said they review code weekly, and 22% do it every other week – a 14% jump from 2020.\n\n**[Your organization needs a DevOps Platform team. [Here's why](/topics/devops/how-and-why-to-create-devops-platform-team/)]**\n\nBut anecdotal data tells a slightly different story, from developers saying their teams do no code review at all, to code reviews so comprehensive they include every merge request, ticket, or pull. Many developers said they review code daily, or even multiple times a day. Survey takers said code reviews were most likely conducted using online chat, with developers showing a strong preference for reviewing code in an IDE rather than a browser.\n\n## Better code reviews\n\nAt GitLab we pride ourselves in dogfooding our DevOps Platform, so of course we spend a lot of time thinking about how to [improve our code review process](/blog/better-code-reviews/). We've had a lot of success [using smaller merge requests](/blog/iteration-and-code-review/), as just one example.\n\nOur survey takers told us they were on the same continuous improvement journey – many spent the past year [evaluating how to make their code reviews and other DevOps stages more efficient](/blog/efficient-code-review-tips/). One respondent offered a detailed look:\n\n*\"We evaluated the team and did value stream mapping and finalized the desired state. In most of the cases we found the team needs an automated pipeline for faster delivery and immediate feedback so that they can act fast rather than later. We also moved security left so that developers can fix security issues fast. And we also made sure developers are doing code review in a collaborative way through pull requests.\"*\n\nAnother team focused exclusively on reducing its dependence on code review:\n\n*\"(We are no longer) relying on code review to have caught all the test scenarios. We now use a coverage scanning tool to tell us if we've got it all.\"*\n\n## More code reviews > less code reviews\n\nThe struggle is real, but so is the importance. Despite a lot of complaining about code review, developers remained adamant about its importance in DevOps. When we asked devs what they wish they could do more of, code review was at the top of the list, with more than 1000 survey takers indicating they wish they could do way more code reviews than they're doing at present.\n\nIn our next blog post, we'll outline five ways GitLab's DevOps Platform has made code reviews easier.\n\n_Our [2022 Global DevSecOps Survey](/developer-survey/) is out now! Learn the latest in DevOps insights from over 5,000 DevOps professionals._\n",[771,9,681],{"slug":7735,"featured":6,"template":686},"the-code-review-struggle-is-real-heres-what-you-need-to-know","content:en-us:blog:the-code-review-struggle-is-real-heres-what-you-need-to-know.yml","The Code Review Struggle Is Real Heres What You Need To Know","en-us/blog/the-code-review-struggle-is-real-heres-what-you-need-to-know.yml","en-us/blog/the-code-review-struggle-is-real-heres-what-you-need-to-know",{"_path":7741,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7742,"content":7748,"config":7753,"_id":7755,"_type":14,"title":7756,"_source":16,"_file":7757,"_stem":7758,"_extension":19},"/en-us/blog/the-devops-platform-for-agile-business",{"title":7743,"description":7744,"ogTitle":7743,"ogDescription":7744,"noIndex":6,"ogImage":7745,"ogUrl":7746,"ogSiteName":670,"ogType":671,"canonicalUrls":7746,"schema":7747},"The DevOps Platform for agile business","For a truly agile business, in every sense of the word, we think you need GitLab's DevOps Platform. Here's why.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668155/Blog/Hero%20Images/devops-strategy-devops-toolchain.png","https://about.gitlab.com/blog/the-devops-platform-for-agile-business","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The DevOps Platform for agile business\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cormac Foster\"}],\n        \"datePublished\": \"2021-11-03\",\n      }",{"title":7743,"description":7744,"authors":7749,"heroImage":7745,"date":7750,"body":7751,"category":769,"tags":7752},[1134],"2021-11-03","\n\nIf you’ve spent time on the site, you already know that GitLab is The DevOps Platform, a single-application solution that is radically different than DIY DevOps toolchains. Have questions? Great — because we have answers, and [we’d love to chat](/demo/). But let’s address the most important question of all right here: “What’s in it for me?” Our approach is objectively different from other solution providers, but why should you care?\n\nIt’s a valid question, and one you should feel comfortable asking any solution provider. You don’t trade your station wagon for a Ferrari for looks, you get the Ferrari because it wins races. In our case, [The DevOps Platform](/solutions/devops-platform/) will absolutely make you better at “doing DevOps.” You can simplify your infrastructure like [Glympse](/customers/glympse/), which consolidated 20 tools into one. You can speed your delivery like [Goldman Sachs](/customers/goldman-sachs/), which increased deployments from two per month to 1,000 per day. You can run security scans concurrently with development like [Chorus](/customers/chorus/). These are all substantial improvements that can [generate real ROI](/resources/report-forrester-tei/) right away while improving quality of life for your employees. On its own, that’s fantastic — but incremental gains are just the start of the journey. \n\nUltimately, the point of The DevOps Platform isn’t doing DevOps better; it’s transforming your business and improving agility.\n\n_That’s a pretty bold statement._\n\nQuite, but it’s also true. We don’t want to improve DevOps outputs. We want to help you realize business outcomes. \n\nDevOps broke down silos between Development and Operations, ramping efficiency and velocity, but did that ultimately make businesses more profitable or innovate? Frequently, no, because the blocker to value creation just moved somewhere else — like Security, Product or Compliance. You can deploy 100x per day, but if a weeks-long audit process stands between any build and your customer, those efficiency gains might not matter. That’s where The DevOps Platform shines. As a single source of truth with a single data store, it provides visibility and context to every stakeholder in the company, whether they’re a Developer, Designer, Auditor, Security Professional or anyone else with a part to play.\n\nGot it. It’s about collaboration.\n\nAbsolutely. Our [mission statement](/company/mission/#mission) is “Everyone can contribute” because collaboration equals innovation. But you can’t just throw a wiki or a ticketing system at a problem and call it fixed. Contextual collaboration matters. If I’m a product owner and development is blocked or a security pro who sees that a known vulnerability wasn’t remediated, I need to know why, when and by whom. If you aren’t collaborating inside the system of work with real-time data, you’re probably missing critical context, which leads to poor decisions and, ultimately, less value in the hands of customers. When every company is a software company, every member of the business needs to be involved with and understand some aspect of software delivery.\n\n_So, now we’re talking about DevSecProductDesignFinanceAuditOps?_\n\nSure. But that’s a mouthful. Let’s just call it “agile business” because that’s really what The DevOps Platform gives you in the end. We’re extending the benefits of DevOps to a broader range of roles to remove those blockers at the edges of the process. This allows your whole organization to engage in and reap the rewards of agile thinking. And that brings us back to transformation. A platform is what’s been missing from so many Digital Transformation initiatives over the years, and a big reason why, even though businesses have gained efficiencies here and there, the primary goal of doing big things differently has been so hard to reach.\n\nNo software solution — not even GitlLab — can do the work for you, but that’s OK. You already know how to run your business. You have ideas and talent. You just need the software behind your software to get out of your way and support your business as you innovate. If you’ve already invested in transformation initiatives, The DevOps Platform can unlock the value of those investments. If you’re just getting started, it can help you scale your innovation immediately. To see how The DevOps Platform can work for you, [try GitLab Ultimate for free](/free-trial/).\n",[9,855,793],{"slug":7754,"featured":6,"template":686},"the-devops-platform-for-agile-business","content:en-us:blog:the-devops-platform-for-agile-business.yml","The Devops Platform For Agile Business","en-us/blog/the-devops-platform-for-agile-business.yml","en-us/blog/the-devops-platform-for-agile-business",{"_path":7760,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7761,"content":7766,"config":7771,"_id":7773,"_type":14,"title":7774,"_source":16,"_file":7775,"_stem":7776,"_extension":19},"/en-us/blog/the-devops-platform-series-building-a-business-case",{"title":7762,"description":7763,"ogTitle":7762,"ogDescription":7763,"noIndex":6,"ogImage":7389,"ogUrl":7764,"ogSiteName":670,"ogType":671,"canonicalUrls":7764,"schema":7765},"The DevOps Platform series: Building a business case","Understanding the need for a DevOps platform is key to realizing the business value of DevSecOps. This is the first in a three-part series.","https://about.gitlab.com/blog/the-devops-platform-series-building-a-business-case","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The DevOps Platform series: Building a business case\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lee Faus\"}],\n        \"datePublished\": \"2022-02-03\",\n      }",{"title":7762,"description":7763,"authors":7767,"heroImage":7389,"date":5116,"body":7769,"category":769,"tags":7770},[7768],"Lee Faus","\n\n\n_This is the first in a three-part series._\n\nOver the past five years, I’ve spent a lot of time with executives having them question me about how other companies and their competitors are navigating [DevSecOps](https://about.gitlab.com/blog/gitlab-is-setting-standard-for-devsecops/). This series shares how to introduce a DevOps platform into your organization to support DevSecOps.\n\n## Realizing the need for DevOps\n\nWhen I was at GitHub, I had a partner at Accenture who provided me with a great definition of DevOps that I still use today: “It is the combination of agility, collaboration, and automation that drives DevOps.” This struck a chord with me because at the time execs were “promoting” DevOps with Jira, GitHub, and Jenkins. They just needed to form a DevOps team to manage these products and provide the integration between them. Then they could mark DevOps off their KPIs for the next year and move on to the next set of challenges.\n\nUnfortunately, this only created more challenges. Tying these products together required a significant amount of work and the people in charge of this integration were usually operations folks or consultants who were more familiar with business continuity plans, standard operating procedures, high availability, and disaster recovery than writing custom code to provide a better experience for users.\n\nThey needed functionality that would capture metrics for team leads, managers, and executives so they could understand how the platform being built was driving customer adoption, increasing revenue or reducing costs. But capturing metrics meant engineering work that led to internal dogfooding. Also, people new to the field of platform engineering were not happy with the solutions they were presented with.\n\nThe entire process led to new evaluations of different products and additional stakeholders. We started managing infrastructure as code, testing tools, security, and deployment tools as part of this solution. We were no longer just building integrations, we were building a developer platform or what most of the executives I talked to called a “Developer Self-Service Platform.”\n\n## The cost of disparate DevOps tools \n\nThe executives I talk to now know this story as they were probably in charge of building this platform five years ago and have since been promoted to own more than just the tooling: Today they’re in charge of site reliability, cloud adoption, or platform engineering. Their teams have anywhere from 10 to 50 different tools, each with their own unique use case. The challenge now is to leverage these tools in a way that they were never intended to be used.  \n\nExecutives need data and analytics to optimize their business. This means collecting the data from all of the tools in a meaningful way where they can build analytical models for them to budget, plan, and report on the state of the business. I know of five Fortune 100 companies that have been on this path for more than three years and are still waiting to provide the first dashboard to their executive teams.\n\nThese companies have easily invested “eight figure” budgets to account for the people, process, culture, and tooling changes required to try and make this work. The total cost of ownership could total more than $10 million when you look at the department's profit and loss statements. The return on investment would take over five years *if* the team is able to generate or reduce costs by an additional $5 million a year in new revenue with a fully integrated platform that is live today. \n\nUnfortunately, people realize quickly that just extracting the data from the tools is not enough. There is context in the conversations that drives real-time decisions and those nuances often don’t make it into the analytical stores. The end result can be knee-jerk business decisions that can be detrimental to the business for years to come.\n\n## Understanding DevOps platform requirements\n\nLet’s begin with a simple question: Do you have requirements for this platform? You are about to embark on building an internal platform for your company. You want to leverage your intellectual capital and experience to drive innovation and create efficiencies that allow you to more effectively run your business for years to come.\n\nIf you don’t have requirements, you should start with a [value stream assessment](/handbook/customer-success/solutions-architects/sa-practices/value-stream-assessments/) to create them. This value stream assessment looks at your lines of business, the processes used to create artifacts, and the time it takes to get those artifacts to a place where they can be used to generate revenue, retain existing customers, or generate business efficiencies.\n\nOnce you have these metrics, measure the time from initial sourcing of the need to the number of people required to touch the artifact and ensure the quality and security of it as it moves across different environments. Divide that result by the time it takes to go from initial change to a production environment.\n\nThe more touchpoints in this process, the more costs will increase and so will the risk of software supply chain issues. Think of this like an assembly line: The hand-offs between touchpoints will require you to implement quality frameworks like [supply chain levels for software artifacts](https://slsa.dev/) to ensure chain of custody for audit and compliance needs. These challenges just keep getting bigger as you try to add additional tools to this platform.\n\nIs it possible to meet all of your requirements with a single product? Try GitLab to see what requirements we can help you meet. The only way to start capturing ROI is to stop building *your* platform today. If you don’t stop building it, you are still capturing costs and every tool you add chips away at the economies of scale. You can think of every new product as one input and one output. This means to go from four tools to five tools you are adding an additional 100 connections to be integrated together. It takes engineering time to properly integrate a product into the overall software supply chain instead of consuming the capabilities as a platform-enabled service that you can use out of the box to meet your requirements. \n\nIf you are interested in learning more about doing a value stream assessment – or if you have done one and looking for ways to build efficiencies inside your organization – please let me know and we can work together to help make the best choice for your organization, even if it means continuing with a platform you already started building.\n\n_In the next part of this series, we will look at how different vendors define the term “platform” and their motivation behind helping you achieve your requirements._\n",[9],{"slug":7772,"featured":6,"template":686},"the-devops-platform-series-building-a-business-case","content:en-us:blog:the-devops-platform-series-building-a-business-case.yml","The Devops Platform Series Building A Business Case","en-us/blog/the-devops-platform-series-building-a-business-case.yml","en-us/blog/the-devops-platform-series-building-a-business-case",{"_path":7778,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7779,"content":7784,"config":7789,"_id":7791,"_type":14,"title":7792,"_source":16,"_file":7793,"_stem":7794,"_extension":19},"/en-us/blog/the-gitlab-guide-to-modern-software-testing",{"title":7780,"description":7781,"ogTitle":7780,"ogDescription":7781,"noIndex":6,"ogImage":5149,"ogUrl":7782,"ogSiteName":670,"ogType":671,"canonicalUrls":7782,"schema":7783},"The GitLab guide to modern software testing","If test is your DevOps team's Public Enemy No. 1, it's time to rethink your strategy. Here's what you need to know about modern software testing.","https://about.gitlab.com/blog/the-gitlab-guide-to-modern-software-testing","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The GitLab guide to modern software testing\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-08-18\",\n      }",{"title":7780,"description":7781,"authors":7785,"heroImage":5149,"date":7786,"body":7787,"category":769,"tags":7788},[851],"2022-08-18","\nWhat's the trickiest part of DevOps? It's software testing, hands down. Year after year, respondents to our [annual DevSecOps surveys](/developer-survey/) have called out testing as the most likely reason for release delays. And that's not all they said: \"Testing takes too long,\" \"There are too many tests,\" \"We need to do more testing,\" “We need more automated testing but don't have time,\" \"Testing happens too late,\" etc.\n\nClearly something this fraught needs all the help, so here is our best advice to get testing \"just right\" in any modern DevOps practice. \n\n## Use the right metrics\n\nAll of the testing in the world doesn't matter if a DevOps team is measuring the wrong things. At GitLab, we use industry-standard metrics, but we look at them a bit differently. When it comes to S1 and S2 bugs we don’t count the time to close but rather the age of the bugs that remain open. Our reasoning? We want to look forward, but we also don't want to [incentivize closing only newer bugs](/blog/gitlab-top-devops-tooling-metrics-and-targets/). So it's important to make sure DevOps teams are looking at the right metrics and with shared goals in mind.\n\n## Forget flaky\n\nTests are noisy, and they can be flaky, setting off alarms and disrupting developer flow, often for no reason. That's at the heart of developer frustration with testing, and one of the biggest problems DevOps teams need to solve. GitLab's Vice President of Quality [Mek Stittri](/company/team/#meks) suggests re-thinking how automated tests are created. Tests need to be validating the right things, but that must include looking at how all of the code components work together and not just at pieces of code. Finally, it doesn't hurt to [develop a manual testing mindset](/blog/software-test-at-gitlab/).\n\n## Make it modern\n\nIn fact, a manual testing mindset, where test designers create tests that actually mimic what real users do, is a key underpinning of modern software testing in DevOps. Testers need to consider getting certified, embracing new technologies like AI, and, perhaps most importantly, be [evangelists for quality](/blog/how-to-leverage-modern-software-testing-skills-in-devops/) on a DevOps team.\n\n## Make automation work harder\n\nSoftware testing may be the most annoying DevOps step, but there's no doubt that automating the process makes everything work more smoothly. Teams with test automation [have fewer complaints about release delays](/blog/want-faster-releases-your-answer-lies-in-automated-software-testing/). And teams that have taken it up a notch and added AI/ML into their test automation process are even more upbeat about testing. After all, bots [don't need to take a lunch break or a vacation](/blog/the-software-testing-life-cycle-in-2021-a-more-upbeat-outlook/). Finally, if automation is well thought out, QA and developers can [actually work together to get code out the door](/blog/what-blocks-faster-code-release/).\n\n## Test for everything\n\nFor all the developer finger-pointing around software testing, it's also clear from our surveys that _more_ testing – of everything – has to happen. When considering how to modernize a software testing strategy, don't forget that \"nice to haves\" like [accessibility testing](/blog/introducing-accessibility-testing-in-gitlab/) aren't actually optional but critical for success.\n\nAnd also don't overlook the potential of newer test techniques like [fuzzing](/blog/why-continuous-fuzzing/), which can work with [Go](/blog/how-to-fuzz-go/), [Rust](/blog/how-to-fuzz-rust-code/), and other languages, and take testing into places other methodologies cannot.\n\n## The bottom line\n\nTesting doesn't have to be the enemy of speedy releases or the object of so much frustration. Start fresh with a modern software testing approach and and make it easy for teams to get the most out of QA.\n",[1158,9,1040],{"slug":7790,"featured":6,"template":686},"the-gitlab-guide-to-modern-software-testing","content:en-us:blog:the-gitlab-guide-to-modern-software-testing.yml","The Gitlab Guide To Modern Software Testing","en-us/blog/the-gitlab-guide-to-modern-software-testing.yml","en-us/blog/the-gitlab-guide-to-modern-software-testing",{"_path":7796,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7797,"content":7803,"config":7809,"_id":7811,"_type":14,"title":7812,"_source":16,"_file":7813,"_stem":7814,"_extension":19},"/en-us/blog/the-gitlab-quarterly-how-our-latest-beta-releases-support-developers",{"title":7798,"description":7799,"ogTitle":7798,"ogDescription":7799,"noIndex":6,"ogImage":7800,"ogUrl":7801,"ogSiteName":670,"ogType":671,"canonicalUrls":7801,"schema":7802},"The GitLab Quarterly: How our latest beta releases support developers","The Value Streams Dashboard and Remote Development provide the capabilities needed to support DevSecOps teams and stay competitive.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668367/Blog/Hero%20Images/innovation-unsplash.jpg","https://about.gitlab.com/blog/the-gitlab-quarterly-how-our-latest-beta-releases-support-developers","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The GitLab Quarterly: How our latest beta releases support developers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dave Steer\"}],\n        \"datePublished\": \"2023-01-24\",\n      }",{"title":7798,"description":7799,"authors":7804,"heroImage":7800,"date":7806,"body":7807,"category":769,"tags":7808},[7805],"Dave Steer","2023-01-24","\nIt’s easy to say that 2023 will be the year of innovation, but with the macroeconomic environment requiring an obsessive eye on cost efficiencies, and in some cases, cost-cutting, exactly how are organizations supposed to stay competitive when it comes to software development and delivery? The answer is clear: Stay focused on supporting your developers. Our two new beta releases help you do just that.\n\nThe GitLab Value Streams Dashboard, now available in private beta, ensures that all stakeholders have visibility, early and in real time, into the progress and value delivery metrics associated with software development and delivery. With everyone on the same page, discussions can be had and adjustments made before developers face obstacles or stall out waiting for decision-makers to get up to speed. Developers can also see, at-a-glance, their impact on the idea-to-customer value chain. The goal: Reduce idle time so that developers can spend more time developing and IT leaders can better unlock their transformation results. Keeping the creativity flowing can boost developer happiness and help provide a glide path for software to make its way into the market and add value. \n\nOur other beta release, GitLab Remote Development, can enable organizations to directly support developers by letting them establish an environment that best suits their needs, including where, when, and how they prefer to work. GitLab Remote Development doesn’t require developers to set up and manage local development environments, which keeps workflow distractions to a minimum. Stripping away location, device, and complex toolchain barriers can maximize developer satisfaction, which can lead to increased ingenuity and productivity.\n\nAn overarching aspect of this developer support is that it is available on a single DevSecOps platform so you don’t have to tack on something special to achieve these goals — the tools are all there and ready to be used to create better software faster.\n\nNow, let’s dig deeper into these capabilities and how they will help you support your developers and deliver value to your customers.\n\n## GitLab Value Streams Dashboard\n\nIn many conversations we have with customers, lack of visibility into metrics for software development value streams comes up as a pain point. Value streams – the process from idea to delivering customer value – should be the epicenter for understanding the progress, blockers, timelines, and costs associated with your development projects. Without this insight, innovation with an eye to cost efficiencies is virtually impossible. It is also difficult to properly support developers through fast, informed decision-making if everyone doesn’t have access to the same real-time data. \n\nThe GitLab Value Streams Dashboard gives stakeholders a bird's-eye view of their teams’ software delivery metrics (such as [DORA metrics](https://docs.gitlab.com/ee/user/analytics/dora_metrics.html) and [flow metrics](https://docs.gitlab.com/ee/user/analytics/value_stream_analytics.html)) for continuous improvement. DevSecOps teams can identify and fix inefficiencies and bottlenecks in their software delivery workflows, which can improve the overall productivity and stability of their development environment. \n\n> \"Our team is excited to try out the DORA metrics capabilities available in the private beta for the new Value Streams Dashboard. We look forward to using other widgets as the Value Streams Dashboard matures, which we hope will greatly improve our productivity and efficiency.\"  \n> _**Rob Fulwell, Staff Engineer, Conversica**_\n\nThe first iteration of the GitLab Value Streams Dashboard enables teams to continuously improve software delivery workflows by benchmarking key DevOps metrics to help improve productivity, efficiency, scalability, and performance. Tracking and comparing these metrics over a period of time helps teams catch downward trends early, drill down into individual projects/metrics, take remedial actions to maintain their software delivery performance, and track progress of their innovation investments.\n\nLeadership can support developers by using information from the dashboard to cross-pollinate and promote best practices, add resources to projects based on metrics, and eliminate common bottlenecks across projects. \n\n\n\n### Roadmap for Value Streams Dashboard\n\nWe are just getting started with delivering capabilities in our Value Streams Dashboard. The roadmap includes planned features and functionality that will continue to improve decision-making and operational efficiencies.\n\nHere are some of the capabilities we plan to focus on next:\n\n1. New visualizations such as overview widgets, [top view treemap](https://gitlab.com/gitlab-org/gitlab/-/issues/381306), and [DORA performance score chart](https://gitlab.com/gitlab-org/gitlab/-/issues/386843)\n2. Security and vulnerability benchmarking  to enable executives to better understand an organization’s security exposure \n3. A new [data warehouse](https://gitlab.com/groups/gitlab-org/-/epics/9318?_gl=1*1orel9k*_ga*ODExMTUxMDcwLjE2Njk3MDM3Njk.*_ga_ENFH3X7M5Y*MTY3MjkxMTgxMC43Ny4xLjE2NzI5MTI0MTIuMC4wLjA.) that supports fast analytical queries and deep data analysis\n4. Additional business value metrics such as adoption, OKRs, revenue, costs, CSAT that align technical and business goals\n\n[Learn more on our direction page](/direction/plan/value_stream_management/).\n\n### Join the beta: We welcome your contributions\n\nAs we iterate on this new offering, GitLab Premium and Ultimate customers are invited to [join our private beta](https://about.gitlab.com/value-streams-dashboard).\n\nWe also invite you to learn more about [Value Streams Dashboard](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html) and [follow along](https://gitlab.com/groups/gitlab-org/-/epics/9317) on the timeline to General Availability.\n\n## GitLab Remote Development\n\nThe increasing adoption of reproducible, ephemeral, cloud-based development environments has accelerated software development. But for developers, frequent context-switching between different environments, navigating complex and extensive toolchains, and managing a local development environment can create friction. GitLab Remote Development helps organizations better support developers by enabling them to spend less time managing their development environment and more time contributing high-quality code.\n\n> \"While a number of stakeholders are critical to successful DevOps, software developers are key for a successful DevOps implementation. Thus, organizations must adequately support developers. This means providing good developer experiences that are not disruptive or intrusive, but that are nonetheless sanctioned by the company, and that remain secure and compliant through automation and abstraction.\"  \n> _**Jay Lyman, 451 Research, a part of S&P Global Market Intelligence, \"Traditional IT teams, leadership stand out as additional DevOps stakeholders – Highlights from VotE: DevOps,\" January 4, 2023**_ \n\nThe centerpiece of GitLab Remote Development is our newly released Web IDE Beta, now the default web IDE experience on GitLab. The Web IDE makes it possible to securely connect to a remote development environment, run commands in an interactive terminal panel, and get real-time feedback from right inside the Web IDE. Understanding that developer familiarity is important, the Web IDE Beta uses a more powerful VS code interface and is able to handle many of the most frequently performed tasks on the existing Web IDE, including committing changes to multiple files and reviewing merge request diffs.\n\nGitLab Remote Development also creates a more secure development experience by enabling organizations to implement a [zero-trust policy](/blog/why-devops-and-zero-trust-go-together/) that prevents source code and sensitive data from being stored locally across numerous developer devices. In addition, organizations can adhere to compliance requirements by ensuring developers are working with approved environments, libraries, and dependencies. \n\nIt’s interesting to note that we deployed the Web IDE beta turned on as default and currently 99.9% of users have kept it toggled on. I encourage you to learn more about the [new Web IDE functionality](/blog/get-ready-for-new-gitlab-web-ide/) in our recent blog post. \n\n### Roadmap for Remote Development\n\nAs iteration continues on the GitLab remote development experience, the roadmap currently focuses on the following functionality next: \n\n1. Provision instances of remote development environments on demand in the customer’s choice of cloud provider.\n2. Allow teams to share complex, multi-repo environments.\n3. Connect from a variety of IDEs, including VS Code, JetBrains, Vim, or the Web IDE.\n4. Ensure an organization’s remote environments conform to its software supply chain security requirements with advanced security tools, authorization, reports, and audit logs.\n\n[Learn more on our direction page](/direction/create/ide/remote_development/).\n\n## Engage with DevSecOps experts\n\nWant to dig deeper into how to innovate while still keeping an eye on cost efficiencies? Join me for our webcast “[GitLab’s DevSecOps Innovations and Predictions for 2023](https://page.gitlab.com/webcast-gitlab-devsecops-innovations-predictions-2023.html?utm_medium=blog&utm_source=gitlab&utm_campaign=devopsgtm&utm_content=fy23q4release)” on Jan. 31 to get expert advice and insights about this era of DevSecOps transformation and the tools and strategies you’ll need to meet this challenge. \n\n[Register today](https://page.gitlab.com/webcast-gitlab-devsecops-innovations-predictions-2023.html?utm_medium=blog&utm_source=gitlab&utm_campaign=devopsgtm&utm_content=fy23q4release)!\n\n**Disclaimer**: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab.\n\n\n_Cover image by [Skye Studios](https://unsplash.com/@skyestudios?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com)_\n  \n",[9,2243,1040,916],{"slug":7810,"featured":6,"template":686},"the-gitlab-quarterly-how-our-latest-beta-releases-support-developers","content:en-us:blog:the-gitlab-quarterly-how-our-latest-beta-releases-support-developers.yml","The Gitlab Quarterly How Our Latest Beta Releases Support Developers","en-us/blog/the-gitlab-quarterly-how-our-latest-beta-releases-support-developers.yml","en-us/blog/the-gitlab-quarterly-how-our-latest-beta-releases-support-developers",{"_path":7816,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7817,"content":7822,"config":7827,"_id":7829,"_type":14,"title":7830,"_source":16,"_file":7831,"_stem":7832,"_extension":19},"/en-us/blog/the-importance-of-compliance-in-devops",{"title":7818,"description":7819,"ogTitle":7818,"ogDescription":7819,"noIndex":6,"ogImage":7334,"ogUrl":7820,"ogSiteName":670,"ogType":671,"canonicalUrls":7820,"schema":7821},"The importance of compliance in DevOps","A basic understanding of what compliance means and how it impacts DevOps.","https://about.gitlab.com/blog/the-importance-of-compliance-in-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The importance of compliance in DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lauren Minning\"}],\n        \"datePublished\": \"2022-08-15\",\n      }",{"title":7818,"description":7819,"authors":7823,"heroImage":7334,"date":7824,"body":7825,"category":875,"tags":7826},[892],"2022-08-15","\n\nDevOps teams must develop secure software, but a key part of security is compliance. Achieving compliance can be time-consuming, stressful, and resource intensive, but it’s increasingly a job DevOps teams – and developers specifically – are being asked to bake into their processes. \n\nHere’s a look at how compliance in DevOps works.\n\n## It starts with standards\n\nOrganizations of all sizes rely on nationally or internationally recognized standards to prove their security postures to customers, partners, and shareholders. Companies need to create systems that streamline compliance with a potentially large number of standards, such as [NIST](https://www.nist.gov), [ISO](https://www.iso.org/home.html), [SLSA levels](https://slsa.dev/spec/v0.1/index), [GDPR](https://gdpr-info.eu), [SOX](https://en.wikipedia.org/wiki/Sarbanes–Oxley_Act), [SOC2](https://us.aicpa.org/interestareas/frc/assuranceadvisoryservices/aicpasoc2report), [PCI DSS](https://www.pcisecuritystandards.org), [HIPAA](https://www.cdc.gov/phlp/publications/topic/hipaa.html), and [HITECH](https://www.hhs.gov/hipaa/for-professionals/special-topics/hitech-act-enforcement-interim-final-rule/index.html). At GitLab, we know exactly how difficult this is as we went through the [SOC 2 compliance process](/blog/benefits-of-transparency-in-compliance/) ourselves, as well as many other compliance initiatives.\n\nPreviously, tackling compliance requirements involved spreadsheets, checklists, and cross-functional teams of people digging for data. Being certified compliant was critical to a business, but not critical enough to codify and streamline the process... and that was before the advent of the cloud where the data could literally be anywhere and everywhere.\n\n“It's incredibly difficult to know if you’ve done the right things to stay secure and compliant, especially in an increasingly complex environment of cloud-native applications, infrastructure-as-code, microservices, and more open source components,” explains Dave Steer, GitLab vice president of product and solutions marketing.\n\nThat's where automation, cooperation, and collaboration -- and DevOps -- come in.\n\n## Creating cohesion\n\nIt’s well known how developers and security pros have [struggled to find common ground](/blog/developer-security-divide/) around secure software development and compliance is one step further down an already rocky path of cooperation. But embedding compliance in DevOps can happen with the right mix of culture and technology. To start, it’s important to decide which standards apply to your organization and if compliance will be kept separate from security, or integrated as part of the same team. Either way, security and compliance work together by one feeding into the other. Compliance sets the parameters for meeting regulatory requirements and security executes the actions to meet those requirements. \n\nAnd that’s when the fun can really begin. The “beating heart” of DevOps is automation and if ever there is a process that is crying out to be automated and literally built into DevOps it’s compliance. There are three main ways DevOps teams can streamline the compliance process:\n\n- **Make compliance standards part of the CI/CD pipeline.** While this might not work for every compliance requirement, it eliminates the need for a manual checklist and provides a clear audit trail and a hard stop if there’s an issue because the pipeline will fail.\n\n- **Leverage containers.** When teams are certain a process or technology is compliant, it can be made into a container image. Over time, these “Golden Images” as [Martin Fowler refers to them](https://martinfowler.com/articles/devops-compliance.html) can be assembled as guiding lights of compliance.\n\n- **Establish a system of record, or SOR.** An SOR will allow a DevOps team to track compliance just before a change is made to the code or the process.\n\n## Is your software supply chain secure?\n\nAs we continue to navigate an always-evolving modern DevOps environment, it’s important to be aware that compliance and security are coming together under one primary theme moving forward: software supply chain security.\n\n[Software supply chain security](/blog/gitlab-supply-chain-security/) is fast becoming the compliance and security umbrella which is supported by security scanning, policy automation/guardrails, [securing the software factory itself](/blog/elite-team-strategies-to-secure-software-supply-chains/), and common controls embedded within the software factory. \n\nCombined with continuous maintenance of compliance and security regulations, automated DevOps practices have the potential to help discover security and compliance issues faster and address threats more quickly and effectively. \n\nIt's imperative that organizations understand how to comply with required regulations. Learn how GitLab helps organizations achieve [continuous compliance](/solutions/compliance/) and about our [software supply chain security direction](/direction/supply-chain/).\n",[9,875,1040],{"slug":7828,"featured":6,"template":686},"the-importance-of-compliance-in-devops","content:en-us:blog:the-importance-of-compliance-in-devops.yml","The Importance Of Compliance In Devops","en-us/blog/the-importance-of-compliance-in-devops.yml","en-us/blog/the-importance-of-compliance-in-devops",{"_path":7834,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7835,"content":7841,"config":7846,"_id":7848,"_type":14,"title":7849,"_source":16,"_file":7850,"_stem":7851,"_extension":19},"/en-us/blog/the-journey-to-a-devops-platform",{"title":7836,"description":7837,"ogTitle":7836,"ogDescription":7837,"noIndex":6,"ogImage":7838,"ogUrl":7839,"ogSiteName":670,"ogType":671,"canonicalUrls":7839,"schema":7840},"The journey to a DevOps Platform","Understand the history of DevOps or be doomed to repeat it. Here's why the journey has been so painful and how a DevOps Platform will help.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668107/Blog/Hero%20Images/global-developer-survey.png","https://about.gitlab.com/blog/the-journey-to-a-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The journey to a DevOps Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cormac Foster\"}],\n        \"datePublished\": \"2021-09-02\",\n      }",{"title":7836,"description":7837,"authors":7842,"heroImage":7838,"date":7843,"body":7844,"category":769,"tags":7845},[1134],"2021-09-02","\n\nIn a recent blog post [about the importance of a DevOps Platform](/blog/welcome-to-the-devops-platform-era/), GitLab CEO Sid Sijbrandij outlined four phases through which organizations frequently travel as their practice matures. It’s a painful journey we see again and again when we meet new customers. It spans every industry and every company size, and it’s the most mature DevOps teams with the most at stake who’ve felt the most pain. \n\nHistorically, if you wanted DevOps to work, you had to be prepared to pay for it. Just managing the backbone of DevOps – the toolchain -– has been a grind. Your “Jenkins Team,” your “GitHub Team,” or even, as one of our customers described, your “Duct Tape Team” (designed to hold it all together and patch holes), added no end value beyond keeping everything from falling apart. That’s a lot of investment to keep the lights on.\n\nIt’s a hard commitment to swallow, and the truth of it is that you shouldn’t have had to. A big part of the problems behind many “low-performing DevOps teams” stems from a poor set of tools for the job. Broadly put, on behalf of the DevOps tool industry: It’s not you, it’s us. The industry created many of these problems because we were thinking small and building to match.\n\n\nAs a philosophy, DevOps is pretty new, and it’s evolved very quickly. That rapid evolution has meant tremendous transformational opportunity, but building for the present left many tools, and the processes behind them, obsolete as soon as they hit the market. \n\nDevOps toolmakers have long been focused on solving discrete, easily understood problems (“BYO DevOps” in Sid’s blog), while DevOps has always aimed at solving bigger problems and looked to a more collaborative, productive transformation. You knew that when you tried to calm the chaos by implementing standards (BIC DevOps). You knew that when you tried to Frankenstack those tools into a servant of your larger ambitions with DIY DevOps integrations. But in the end, tools were creating almost as much work as they automated.\n\nIt makes sense when requirements are evolving so quickly. In 2011, when GitLab offered just a repository and issues, we couldn’t have foreseen [Design Management](https://docs.gitlab.com/ee/user/project/issues/design_management.html) or [ML Ops](/handbook/engineering/incubation/mlops/), but ten years later, they’re a key components of a movement toward a DevOps Platform for everyone. And that’s the point of the DevOps Platform Era (Phase 4). We’ve iterated our way to a place where we can replace blockers with enablement, and \\*support\\* your efforts instead of increasing your burden.\n\n**[Stop paying the “DevOps tax” by moving to a DevOps Platform. [Here’s how](/topics/devops/use-devops-platform-to-avoid-devops-tax/)]**\n\nThis isn’t unexpected. Every technology reaches this inflection point as it evolves. In the not-too-distant-past, customer relationship management (CRM) required a portfolio of sales force automation and marketing automation tools, commerce engines, app servers, analytics engines, and huge amounts of data integration to make it work. Now we have SaaS-based CRM solutions with a single monthly fee.\n\nWhile GitLab has always focused on delivering a DevOps Platform as a single application, we're excited to see the industry as a whole shift to a platform mindset. Late last year, Gartner released its vision of the [DevOps Value Stream Delivery Platform](/solutions/value-stream-management/) in a new [Market Guide](https://page.gitlab.com/resources-report-gartner-market-guide-vsdp/), in which we’re happy to be a representative vendor, and we’re excited to watch their coverage grow.\n\n**[Make [the most out of your DevOps platform](/topics/devops/seven-tips-to-get-the-most-out-of-your-devops-platform/)]**\n\nWe’re also excited to hear how a DevOps Platform benefits our customers in concrete ways. In our [2021 DevSecOps Survey](/developer-survey/), respondents told us a DevOps Platform resulted in better DevOps, improved collaboration, easier automation and expanded visibility and traceability. Or, as one survey taker said, a DevOps Platform “gives us reduced mean time to recovery (MTTR), quicker time to market, reduced lead time for fixes, and fewer change failures.”\n\nDevOps hasn’t stopped evolving, and neither have we, but we’ve reached the point where we know how the pieces need to work together, and we’ve built a platform to support it. To see for yourself, [try GitLab Ultimate for free](/free-trial/)!\n\n## Read more about the DevOps Platform:\n\n- [How ten steps over ten years led to the DevOps Platform](/blog/how-ten-steps-over-ten-years-led-to-the-devops-platform/)\n\n- [Making the case for a DevOps platform: What data and customers say](/blog/making-the-case-for-a-devops-platform-what-data-and-customers-say/)\n\n- [Agile planning with a DevOps platform](/blog/agile-planning-with-a-devops-platform/)\n\n- [Welcome to the DevOps Platform era](/blog/welcome-to-the-devops-platform-era/)\n\n- [It's time to build more accessible software. A DevOps platform can help](/blog/how-the-devops-platform-makes-building-accessible-software-easier/)\n\n",[9,793,916],{"slug":7847,"featured":6,"template":686},"the-journey-to-a-devops-platform","content:en-us:blog:the-journey-to-a-devops-platform.yml","The Journey To A Devops Platform","en-us/blog/the-journey-to-a-devops-platform.yml","en-us/blog/the-journey-to-a-devops-platform",{"_path":7853,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7854,"content":7860,"config":7865,"_id":7867,"_type":14,"title":7868,"_source":16,"_file":7869,"_stem":7870,"_extension":19},"/en-us/blog/the-kubecon-summary-from-a-product-perspective",{"title":7855,"description":7856,"ogTitle":7855,"ogDescription":7856,"noIndex":6,"ogImage":7857,"ogUrl":7858,"ogSiteName":670,"ogType":671,"canonicalUrls":7858,"schema":7859},"How what we learned at KubeCon EU 2022 will impact our product roadmaps","Platform integrations and secrets management are among our product team's primary takeaways. Find out why.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097776/Blog/Hero%20Images/Blog/Hero%20Images/2_2.png_1750097776369.png","https://about.gitlab.com/blog/the-kubecon-summary-from-a-product-perspective","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How what we learned at KubeCon EU 2022 will impact our product roadmaps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2022-05-31\",\n      }",{"title":7855,"description":7856,"authors":7861,"heroImage":7857,"date":7862,"body":7863,"category":769,"tags":7864},[1356],"2022-05-31","\nAfter two years of only virtual KubeCon events, the GitLab product team was excited to participate in and meet colleagues, partners, and more from our industry at KubeCon EU 2022, held in Valencia, Spain. We were present with four product leaders, a software developer, and a UX researcher. This post summarizes our primary takeaways from the conference, an experience that will affect our roadmaps.\n\nWe will discuss the following topics:\n\n- Internal platforms and GitOps\n- Secrets management\n- Infrastructure integrations\n- WebAssembly a.k.a. WASM\n\nThere were 32 topic types and several 0-day events at KubeCon. Many talks focused on a few tools. Many Cloud Native Computing Foundation ([CNCF](https://www.cncf.io/)) projects had their community meetings during these days. Some talks were given IRL, and others were broadcast virtually with live Q&A. There were a variety of topics and approaches. There were many talks about the various aspects of cluster management, too. However, we left this topic out on purpose because at GitLab we want to focus on the software developers and provide one DevOps platform to support their work. Cluster management is one step away from this focus. Still, we noticed some remarkable patterns as highlighted by the four elements of our list.\n\n> You’re invited! Join us on June 23rd for the [GitLab 15 launch event](https://page.gitlab.com/fifteen) with DevOps guru Gene Kim and several GitLab leaders. They’ll show you what they see for the future of DevOps and The One DevOps Platform.\n\n## Internal platforms and GitOps\n\nCompanies want their developers to focus on their core business. They create internal platforms to hide the complexity of Day 0-2 operations from their software engineers and still allow the \"shift left\" movement of DevOps. These platforms often involve the welding of several tools.\n\nMany talks presented how the given team or company approached their platform problem and what tools they used, and one could often feel the 18-month sweat of a whole platform team trying to come up with a solution.\n\nThese platforms use either a push- or pull-based model for deployments. No single approach is emerging due to legacy applications and different requirements. While there is a definition of GitOps provided by the [OpenGitOps](https://opengitops.dev/) initiative, several presenters offered their own definitions, including of pull-based deployments.\n\nWe fielded a large-scale survey related to secrets at KubeCon, and learned that users would like help with the [Pipeline Authoring](/direction/verify/pipeline_composition/) workflow.\n\nBesides the wiring of the tools, the industry is still looking for a unified approach to multi-tenancy (there might not be one), and sometimes integrating security processes seems overly challenging.\n\n### How does this affect our roadmap?\n\nThere is a lot of potential in building a platform used as the starting point for internal platforms. Imagine a \"tool\" that shortens the time required to create an internal platform to days or weeks instead of a whole year. This is the GitLab vision of The One DevOps platform.\n\nAs a result, we don't plan any changes in our direction. We will continue investing in the recently started [Deployment direction](/direction/delivery/) to provide all the building blocks for a platform in a single tool and are already actively looking for integrated experiences across our offering.\n\nWe’re working on a CI/CD Component Catalog that includes CI templates. This will [support the Pipeline Authoring workflow](https://gitlab.com/groups/gitlab-org/-/epics/7462).\n\n## Secrets management\n\nOne of the things that often came up in our discussions is secrets management. We fielded a large-scale survey related to secrets at KubeCon, and attendees were glad that we’re thinking about this topic. Security is part of the DevOps discussion, and secrets management is a serious issue, especially in a cloud-native aspect.\n\n- Jenkins, GitHub and GitLab were all mentioned during the secret management discussions.\n- Users would like to offload the secrets management responsibility to another product. In many cases, their security requirements are strict, so they don't want/can't handle secrets by themselves.\n- Hashicorp Vault is a preferred tool (primarily in large enterprise companies working in finance or government) to manage and handle secrets. At the same time, most companies would like to avoid operating one more application in their stack.\n- Open ID Connect [OIDC](https://docs.gitlab.com/ee/integration/openid_connect_provider.html) with the JSON web token (JWT) is an essential direction for us.\n\n### How does this affect our roadmap?\n\nWe should invest more in secrets management since this is a pain our customers would like us to solve, and it's becoming a nonstarter feature for many organizations.\n\nWe want to advance in three main vectors:\n\n- Improve our existing secrets management solution - although we don't have a clear solution, we should improve our current variables capabilities to include additional features that could help users leverage variables for secrets. So it would be a \"good enough\" feature they can use. We are actively working toward this direction by removing some of the limitations we have around [variables and masking](https://gitlab.com/groups/gitlab-org/-/epics/1994).\n- Improve our existing [Hashicorp Vault integration](https://docs.gitlab.com/ee/ci/examples/authenticating-with-hashicorp-vault/) using the JWT token, allowing us to integrate with additional vendors (AWS, AZURE, GCP). Like the previous point, we are moving toward this direction by supporting OIDC and [adding audience claims to our JWT token](https://gitlab.com/groups/gitlab-org/-/epics/7335).\n- We need to develop [a clear strategy for a built-in secrets management solution](/direction/govern/pipeline_security/secrets_management/#next-9-12-monhts). In order to provide our users/customers with choice, GitLab wants to use Hashicorp Vault for secrets management handling. We believe that our approach should be not to build the logic ourselves but to leverage an open source, [cloud native](/topics/cloud-native/) project that we could build into GitLab.\n\n## Infrastructure integrations\n\nInfrastructure integrations came in several flavors during the talks. Some are about cluster management, that is not our focus in this blog. Several presentations show that internal platforms need a strong infrastructure aspect, too. When a new project/microservice is started, it might require a new namespace in the cluster with associated RBAC and policies, optionally storage, a source code management repo with automation, and the appropriate permissions. Deployments might create ephemeral environments or could modify the underlying environment within predefined constraints.\n\nThe top tools mentioned in this area are:\n\n- Terraform\n- Crossplane\n- Pulumi\n\n### How does this affect our roadmap?\n\nGitLab already has [great integrations for Terraform](https://docs.gitlab.com/ee/user/infrastructure/iac/), and the other tools are on our radar, too.\n\nWe are open to integrations but cannot currently prioritize the other integrations on our own. We hope that the community will be interested in contributing to benefit everyone.\n\nBuilding Docker containers might not be necessary to get easy-to-manage container binaries. WASM runtimes become available for Kubernetes, and many programming languages can natively compile to WASM. WASM can provide a secure runtime environment without Docker and might be able to simplify the toolchain developers need to learn.\n\nWe don't plan to add direct WASM support to GitLab yet. The generic package registry can hold WASM modules while their deployment is up to the user.\n\nAt the same time, we see a lot of potential in simple runtime environments built around WASM. While GitLab is not in the business of offering runtime services, we will be actively monitoring the market. We might look into more WASM integrations as we see more demand and tools and services maturing in this space.\n\n## GitLab feedback\n\nIt's great to work on a product where the overall sentiment is positive, both from customers that intensely rely on it and from attendees that have to use other tools but would love to use GitLab or just started to play with it recently.\n\nWe received the following notable mentions as feedback:\n\n- Stability and reliability improved over the last several months.\n- Users love our documentation (primarily around CI) - they mentioned it's easy to use and get started with.\n- Given the size of GitLab and the number of our users, we received feedback about long-outstanding issues. We were happy to respond that we are addressing at least some of them shortly.\n- Several customers had asked if we got some resources for migrating from Jenkins to GitLab.\n- A few customers mentioned that they had to move away from GitLab mainly because of an upper-level decision despite favouring GitLab.\n\n## Conclusions\n\n![The GitLab team](https://about.gitlab.com/images/blogimages/kubecon-gitlab-team.jpg)\n\nWe enjoyed all the talks and were delighted to meet and speak with our users and customers. Thanks to all of you, we could \"feel the pulse\" on how we are doing and validate our direction.\n\nWe hope that this blog will guide those who could not [attend KubeCon](https://about.gitlab.com/events/kubecon/) and serve as a summary for those who did attend. All the recordings will likely be available on YouTube from Jun 6, 2022.\n\nLet us know in the comments if you think we missed some important direction.\n\n_This blog post and linked pages contain information related to upcoming products, features, and functionality.\nIt is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab Inc._\n",[1477,976,977,534,1041,9],{"slug":7866,"featured":6,"template":686},"the-kubecon-summary-from-a-product-perspective","content:en-us:blog:the-kubecon-summary-from-a-product-perspective.yml","The Kubecon Summary From A Product Perspective","en-us/blog/the-kubecon-summary-from-a-product-perspective.yml","en-us/blog/the-kubecon-summary-from-a-product-perspective",{"_path":7872,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7873,"content":7879,"config":7884,"_id":7886,"_type":14,"title":7887,"_source":16,"_file":7888,"_stem":7889,"_extension":19},"/en-us/blog/the-road-to-smarter-code-reviewer-recommendations",{"title":7874,"description":7875,"ogTitle":7874,"ogDescription":7875,"noIndex":6,"ogImage":7876,"ogUrl":7877,"ogSiteName":670,"ogType":671,"canonicalUrls":7877,"schema":7878},"The road to smarter code reviewer recommendations","Machine learning is coming to GitLab's code review process. Here's what you need to know, and how you can help!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668426/Blog/Hero%20Images/retrospectivesgitlabpost.jpg","https://about.gitlab.com/blog/the-road-to-smarter-code-reviewer-recommendations","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The road to smarter code reviewer recommendations\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2022-01-04\",\n      }",{"title":7874,"description":7875,"authors":7880,"heroImage":7876,"date":7881,"body":7882,"category":769,"tags":7883},[2862],"2022-01-04","\nYou may recall back in June 2021, we [announced the acquisition of UnReview](/press/releases/2021-06-02-gitlab-acquires-unreview-machine-learning-capabilities/), a machine learning (ML) based solution for automatically identifying appropriate expert [code reviewers](/stages-devops-lifecycle/create/) and controlling review workloads and distribution of knowledge.\n\nAt the start of the new year we wanted to provide an update on our integration progress and our wider vision of leveraging machine learning to make GitLab's [DevOps Platform](/solutions/devops-platform/) smarter. You see, the acquisition of UnReview also was the initial staffing of [our new ModelOps stage](/direction/modelops/).\n\n### Our Newest DevOps Stage\n\nThis new stage, which we’ve named ModelOps, is focused on enabling and empowering data science workloads on GitLab. GitLab ModelOps aims to bring data science into GitLab both within existing features to make them smarter and more intelligent, but also empowering GitLab customers to build and integrate data science workloads within GitLab.\n\nSo what is ModelOps you may wonder? We view ModelOps as an all encompassing term to cover the entire end to end lifecycle of artificial intelligence models. We wanted to set our vision wide to fully cover everything needed to power data science workloads. DataOps is the processing of data workloads (think traditional ELT: extract, load, transform) and MLOps is the building, training, and deployment of machine learning models. If you’re confused don’t worry, it’s a lot to wrap your head around.\n\n![a look at the stages of MLOps](https://about.gitlab.com/images/blogimages/MLops.png){: .shadow.small.center}\n\nToday our DevOps Platform helps plan, build, test, secure, deploy, and monitor traditional software. Now we want to extend our DevOps Platform to include AI and ML workloads. If this is interesting to you, be sure to check out our recent Contribute talk where we dive deeper into plans for our ModelOps stage.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/C08QVI99JLo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### UnReview as our first feature\n\nSo what does this have to do with UnReview? Our acquisition of UnReview is going to be our first [AI Assisted](/direction/ai-powered/) group’s feature: suggested reviewers within [GitLab’s existing reviewers experience](/blog/merge-request-reviewers/). Today, a developer in a merge request has to manually choose a reviewer to look at their code. With UnReview we can leverage the contribution history for a project and recommend someone well-suited for code review of your specific changes.\n\nHere’s an early mockup (and it may differ from our final UI) of how we’re thinking about this integration:\n\n![an early mockup of our UI](https://about.gitlab.com/images/blogimages/codereviewmockup.png){: .shadow.small.left}\n\nThe UnReview algorithm looks at a variety of data points from your project’s contribution history to suggest an appropriate reviewer. We’re still in the early days of this integration but our initial internal testing shows great suggestions.\n\n### Customer beta coming soon!\n\nThis leads me to a final question, might you want to be one of our first customers to try this new code review experience? In early 2022, we’ll begin a private customer beta of this new functionality. If interested, [fill out this form to express interest](https://docs.google.com/forms/d/e/1FAIpQLScpmCwpwyBr0GrXxBQ6vE02eokclFAs9lFk_g5dcyuGaHqFuQ/viewform). Do note that we can’t accept everyone and we’ll focus initially on customer profiles that are well suited for the initial version of the suggestion algorithm. Our only ask is we’d like to find customers with active projects that have a healthy number of contributors. The model currently works best on larger repositories with lots of contributors where it may not immediately be clear who is an ideal code reviewer.\n\nWe can’t wait for customers to begin using this new reviewer suggestion experience and will be providing more updates in early 2022.\n",[9,231,683,1181],{"slug":7885,"featured":6,"template":686},"the-road-to-smarter-code-reviewer-recommendations","content:en-us:blog:the-road-to-smarter-code-reviewer-recommendations.yml","The Road To Smarter Code Reviewer Recommendations","en-us/blog/the-road-to-smarter-code-reviewer-recommendations.yml","en-us/blog/the-road-to-smarter-code-reviewer-recommendations",{"_path":7891,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7892,"content":7897,"config":7902,"_id":7904,"_type":14,"title":7905,"_source":16,"_file":7906,"_stem":7907,"_extension":19},"/en-us/blog/the-software-testing-life-cycle-in-2021-a-more-upbeat-outlook",{"title":7893,"description":7894,"ogTitle":7893,"ogDescription":7894,"noIndex":6,"ogImage":4161,"ogUrl":7895,"ogSiteName":670,"ogType":671,"canonicalUrls":7895,"schema":7896},"The software testing life cycle in 2021: A more upbeat outlook","When DevOps teams trip, it's almost always over software testing. But in our 2021 survey we found some signs the software testing life cycle might finally be moving forward.","https://about.gitlab.com/blog/the-software-testing-life-cycle-in-2021-a-more-upbeat-outlook","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The software testing life cycle in 2021: A more upbeat outlook\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2021-05-06\",\n      }",{"title":7893,"description":7894,"authors":7898,"heroImage":4161,"date":7899,"body":7900,"category":679,"tags":7901},[851],"2021-05-06","\nOur [2022 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/previous/2022/) is out now! Learn the latest in DevOps insights from over 5,000 DevOps professionals.\n\nThe software testing life cycle can feel like the [DevOps](/topics/devops/) punching bag, and for good reason: For the last three years, [our annual survey participants have unanimously named/blamed test as the number one reason for release delays](/blog/what-blocks-faster-code-release/). In our latest survey, participants had some very pointed commentary about the software test life cycle:\n\n> \"Testing can be both slow in writing and running.\"\n>\n> \"Testing is not yet fully automated in the deployment cycle; hoping to improve that with our move from BitBucket + Jenkins/drone to GitLab.\"\n>\n> \"Testing delays everything.\"\n>\n> \"Some software delivery teams have delegated their testing to QA personnel instead of writing comprehensive end-to-end testing suites. These teams suffer from very long (several days) bottlenecks in shipping to production.\"\n\nBut for all the complaints, our [2021 Global DevSecOps Survey](/developer-survey/) did find some signs that the software test life cycle, like many other components of DevOps, is beginning to mature. For starters, almost 25% of survey respondents said they’ve achieved [full test automation](https://www.softwaretestinghelp.com/automation-testing-tutorial-1/), more than double the number reported last year. And 28% said their teams are at least halfway to full test automation.\n\n## Changing roles\n\n[In our 2020 survey](/developer-survey/previous/2020/) we found DevOps roles are changing, and this year that pattern seems to be continuing, even in testing. Roughly 34% of participants said devs are testing their own code (up from 31% last year) and 32% said code is tested as it’s written, a significant bump from 25% last year.\n\nAt the same time though, when we asked devs what they need to be **doing more of** the vast majority of responses mentioned testing, whether it was pen, smoke, A/B, manual or simply test automation. For all the forward momentum, 25% of teams are either just beginning to consider test automation or have none at all. \n\nAn improving picture, but testing is simply irritating to some of our respondents:\n\n> \"Automated testing is ignored ‘due to time constraints.’\"\n>\n> \"Testing? That's an interesting idea.\"\n>\n> \"We intended to do test-driven development (TDD) but it usually ends up being after the fact.\"\n>\n> \"I try to write my code with TDD when it's possible; it's complicated when writing React components, or when changing a function that is not tested with many side effects and many inputs and the tech lead forbids (me) to refactor it at the moment .... ='(.\"\n\n## A potential game-changer\n\nAlthough it sounds like *Space Odyssey* meets DevOps, there is another reason for optimism around software testing: [Artificial intelligence/machine learning](/blog/ai-in-software-development/) is on the rise now and what could be more perfect than bots running endless tests? Bots can be deployed in the thousands and they don’t take vacations, or even lunch breaks. \n\nThe appeal of endless testing was clear in [our survey responses this year](/developer-survey/). Just over 41% of respondents told us bots were testing their code and/or [AI/ML](/blog/ai-in-software-development/) was reviewing code before human intervention. That’s up dramatically from just 16% last year. All told, 25% of respondents use bots to test their code, 16% use AI/ML to review code before a human sees it, and 34% are exploring the idea of AI/Ml but haven’t done anything about it yet. Exactly one-quarter of respondents aren’t using AI/ML in test.\n\nSoftware testing is just one small part of what DevSecOps Survey covers. Our [2022 Global DevSecOps Survey](/developer-survey/) has the latest insights from over 5,000 DevOps professionals.\n",[681,9,875],{"slug":7903,"featured":6,"template":686},"the-software-testing-life-cycle-in-2021-a-more-upbeat-outlook","content:en-us:blog:the-software-testing-life-cycle-in-2021-a-more-upbeat-outlook.yml","The Software Testing Life Cycle In 2021 A More Upbeat Outlook","en-us/blog/the-software-testing-life-cycle-in-2021-a-more-upbeat-outlook.yml","en-us/blog/the-software-testing-life-cycle-in-2021-a-more-upbeat-outlook",{"_path":7909,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7910,"content":7916,"config":7921,"_id":7923,"_type":14,"title":7924,"_source":16,"_file":7925,"_stem":7926,"_extension":19},"/en-us/blog/the-top-skills-you-need-to-get-your-devops-dream-job",{"title":7911,"description":7912,"ogTitle":7911,"ogDescription":7912,"noIndex":6,"ogImage":7913,"ogUrl":7914,"ogSiteName":670,"ogType":671,"canonicalUrls":7914,"schema":7915},"The top skills you need to get your DevOps dream job or a higher salary","AI, ML, automation – time to learn these new tech skills to stay competitive and land the job or promotion you want.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664025/Blog/Hero%20Images/devopscareer.jpg","https://about.gitlab.com/blog/the-top-skills-you-need-to-get-your-devops-dream-job","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The top skills you need to get your DevOps dream job or a higher salary\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2021-11-17\",\n      }",{"title":7911,"description":7912,"authors":7917,"heroImage":7913,"date":7918,"body":7919,"category":679,"tags":7920},[810],"2021-11-17","\n_Our [2022 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/previous/2022/) is out now! Learn the latest in DevOps insights from over 5,000 DevOps professionals._\n\nIf you’re looking to transform your job, [your salary](/blog/four-tips-to-increase-your-devops-salary/) and your ability to get a job with your dream company, there are some skills you need to add to your toolkit.\n\nDevOps is a rapidly changing field. Automation is booming. There’s an increasing focus on artificial intelligence (AI) and machine learning (ML), along with moving security to the left. And there’s a call to master an ever-growing list of programming languages. Face it, DevOps professionals need to be in a [constant learning mode](/blog/best-advice-for-your-devops-career-keep-on-learning/). If you’re picking up new expertise, you’re likely going to find yourself in a [coveted position](/blog/a-look-at-devops-salaries/) since companies are struggling to fill jobs with DevOps professionals who have the latest skills. \n\nSo what technologies should you consider adding to your toolbelt? Of course, you need to take stock of your own skill set, experiences and certifications, and compare all of that to what your company, or your dream company, might need. Here’s a helpful list of considerations.\n\n## Expand your programming languages\n\nSo when it comes to figuring out what programming languages you should know, it’s a lengthy list to cull through. What would most benefit your company? And what would benefit a potential employer?\n\nThe DevOps Institute noted in its [2021 Upskilling Enterprise DevOps Skills Report](https://info.devopsinstitute.com/2021-upskilling-report-download) that it’s smart for developers to make sure they don’t specialize in a single language. \n\nAccording to the [Stack Overflow survey](https://insights.stackoverflow.com/survey/2021), developers who are already working with other programming languages are most interested in learning [Python](/blog/beginner-guide-python-programming/), JavaScript and Go. And [Brendan O’Leary](https://gitlab.com/brendan), a staff developer evangelist, and product and engineering leader at GitLab, advised that developers should learn Go and [Rust](/blog/rust-programming-language/), which are both useful for building in memory safety.\n\nEven if you’re programming with a popular but common language like JavaScript or C++ currently, that doesn’t mean you can’t showcase other languages on your resume through contributions to open source projects or by [volunteering your coding time](https://www.donatecode.com).\n\n### Understand the role of automation\n\nThe DevOps Institute’s survey noted that automation tool knowledge is a “must-have.” And out of all the automation skills, the report listed the top five as continuous integration (78 percent), continuous delivery (77 percent), continuous deployment (72 percent), continuous operation and support (62 percent), and [DevSecOps](/topics/devsecops/) (56 percent). \n\nIf your current team’s process isn’t highly automated, don’t fear – there are lots of learning options to the rescue. A quick search on YouTube found more than [100 videos on continuous deployment](https://www.youtube.com/results?search_query=continuous+deployment), as just one example. Most large companies offer their own training tracks (and [we do too](/learn/)) and, of course, there are [lots of certification programs](/blog/best-advice-for-your-devops-career-keep-on-learning/) as well.\n\n### Bone up on other key DevOps skills\n\nThe third-highest ranked skill domain is technical skills, according to the DevOps Institute. It’s a broad category, but there are core technical skills, like having an understanding of cloud platforms, [CI/CD](/topics/ci-cd/) and monitoring, along with operating systems, containers, big data, data analysis and microservices that will be important to nearly any employer.\n\nIn our 2021 Global DevSecOps Survey, developers told us there were a lot of technologies they’d like to dig into, including GitOps, IoT/blockchain, cloud/cloud native, cross-platform development, low code, data science, Python and cryptography.\n\nThat tracks with what The DevOps Institute found; the top seven technologies that organizations plan to implement over the next two years include IT automation technology, Gigabit Wi-Fi networking, Internet of Things, virtual desktop infrastructure, converged/hyperconverged infrastructure, container technology and serverless computing. \n\n### Dig into security\n\nA developer who not only understands security but can write the tests and prioritize the fixes is going to be incredibly attractive to a DevOps team looking to shift security firmly to the left. Job swapping or shadowing the security team is one way to build this knowledge base. Finding the dev team’s [security champion](/blog/why-security-champions/) and doing what they do also works. Finally, there’s a practical and actionable podcast called [The Secure Developer](https://www.devseccon.com/the-secure-developer-podcast/) that offers advice from a wide variety of developer pros and security pros on how to up your security game.\n\n### Focus on AI and ML\n\nOur AI overlords are coming, so it’s best to be prepared. While we’re only sort of kidding, it’s completely clear that AI and ML are showing up in DevOps in a surprising variety of ways, including testing, analysis and monitoring. \n\nAI and ML are most likely to arrive first in the testing arena; our survey showed that 75 percent of teams are either using AI and ML or bots for testing and code review, or they’re planning to – up from 41 percent the year before. So that’s an obvious place to focus your energies. \n\n### Jump in and explore learning opportunities\n\nIt’s about continuous education. Whether your company offers you opportunities to earn new certifications and master new languages, or you have to DIY, you need to figure out a way to keep learning. Keep adding to your skill set and resume. \n\n“Continuing to educate yourself is critical,” said GitLab’s O’Leary. “There are always new technologies, new languages, new skills to be learned. Companies need someone who is flexible and can solve problems. Mastering new technologies is one of the more important things you can do for yourself.”\n\nCover image by [Green Chameleon](https://unsplash.com/@craftedbygc) on [Unsplash](https://unsplash.com).\n\n_Our [2022 Global DevSecOps Survey](/developer-survey/) has the latest insights from over 5,000 DevOps professionals. You can also compare it with [previous year surveys](/developer-survey/previous/)_\n",[813,9,681],{"slug":7922,"featured":6,"template":686},"the-top-skills-you-need-to-get-your-devops-dream-job","content:en-us:blog:the-top-skills-you-need-to-get-your-devops-dream-job.yml","The Top Skills You Need To Get Your Devops Dream Job","en-us/blog/the-top-skills-you-need-to-get-your-devops-dream-job.yml","en-us/blog/the-top-skills-you-need-to-get-your-devops-dream-job",{"_path":7928,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7929,"content":7934,"config":7939,"_id":7941,"_type":14,"title":7942,"_source":16,"_file":7943,"_stem":7944,"_extension":19},"/en-us/blog/the-top-software-developer-challenges-in-2022",{"title":7930,"description":7931,"ogTitle":7930,"ogDescription":7931,"noIndex":6,"ogImage":7838,"ogUrl":7932,"ogSiteName":670,"ogType":671,"canonicalUrls":7932,"schema":7933},"The top software developer challenges in 2022","From AI to hiring, security breaches and Covid, our 2022 Global DevSecOps Survey uncovered the top software developer challenges.","https://about.gitlab.com/blog/the-top-software-developer-challenges-in-2022","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The top software developer challenges in 2022\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-10-05\",\n      }",{"title":7930,"description":7931,"authors":7935,"heroImage":7838,"date":7936,"body":7937,"category":769,"tags":7938},[851],"2022-10-05","\nIn our 2022 Global DevSecOps Survey we asked developers about the most difficult parts of their jobs, a question that’s been answered in previous years with comments about tricky toolchain integrations, complex programming languages and business-side folks who \"just don’t get it.”\n\nBut apparently this year *we* didn’t get it: [More than 5,000 respondents](/developer-survey/) told us they were worried about the inability to hire and retain talent, the economy and the post-Covid world they’re expected to work in. They are also concerned about adoption of complex technologies such as artificial intelligence, 5G and edge computing, and the fear of/responsibility for security breaches and what that would mean to their organizations.\n\n(That sound you hear in the background is the shattering of the “devs are oblivious to business” stereotype.)\n\nObviously a tectonic shift in the developer role is underway.\n\n“Two massive waves are crashing against each other right now,” explains [Brendan O’Leary](/company/team/#brendan), staff developer evangelist at GitLab. “One wave is developers as kingmakers. We were ‘brought into the palace’ because every company needed to have software as its core competency and the pendulum swung toward developers. But the other wave is the massive correction in the market. These two things happening at the same time are putting a huge squeeze on businesses and developers.”\n\nA [longstanding shortage of software developers](https://www.forbes.com/sites/forbestechcouncil/2021/06/08/is-there-a-developer-shortage-yes-but-the-problem-is-more-complicated-than-it-looks/?sh=215d08f33b8e) has been made worse by macroeconomic conditions, but demand for software isn’t decreasing despite the market upheaval, O'Leary adds. The result is devs at the center of nearly all the most difficult challenges today, from [hiring](/blog/6-tips-to-make-software-developer-hiring-easier/) to [security breaches](/blog/gitlabs-2022-global-devsecops-survey-security-is-the-top-concern-investment/) and new technologies. \n\nTo put it another way: “We can’t be flippant about any part of the job anymore,” he adds.\n\nHere’s a look at what is keeping developers up at night.\n\n### Security\n\nMore than 1,000 respondents said all of the issues around security make their jobs infinitely more difficult and complicated.\n\n- “(The hardest thing is to) keep it secure and keep it updated.”\n- “My challenge is keeping up with the latest tools and security for optimal performance and privacy.”\t\t\t\n- “I am trying to build applications that are secure and stable.”\n- “Cybersecurity attacks are the biggest challenge facing us today.”\n- “The hardest part of my job? Data security, data security, I repeat, data security.”\n\n### “The Covid effect”\n\nHundreds of survey takers pointed to the changes brought about by Covid, including remote/hybrid work, economic forces, \"The Great Resignation,” and a number of other things. One respondent called it “the Covid effect” and many stressed that this new way of working has made their fast-paced jobs harder.\n\n### Staffing\n\nHard to hire, hard to keep, hard to even find...that’s what survey takers said about the issue of staffing.\n\n- “The biggest challenge is finding sufficient coding staff.”\n- “The biggest challenge is to find people to fill the jobs.”\n- “We have experienced significant difficulty in finding and retaining qualified staff.”\n\n### New technologies\n\nWith all the other pressures on developers, even exciting new technologies can seem daunting. One respondent put it this way:\n\n_“4G, 5G, AI, Metaverse, virtual space - developers have to support all of this.”_\n\nMany, many others simply said: “Technology is rapidly changing.”\n\n## Bold new challenges\n\nThis is all a long way of saying there has perhaps never been more on developers’ plates. Two developer respondents summed it up well:\n\n_“We have a development capacity challenge, a recruiting challenge and a knowledge-sharing challenge.”_\n\n_“For me, these are the eight biggest challenges we are facing as software developers: 1) Keeping pace with innovation. 2) Cultural change. 3) Customer experience. 4) Data privacy. 5) Cybersecurity. 6) AI and automation. 7) Data literacy. 8) Cross-platform functionality.”_\n\nWhat do you see as the biggest challenges facing developers? Let us know in the comments field below.\n",[681,9,267],{"slug":7940,"featured":6,"template":686},"the-top-software-developer-challenges-in-2022","content:en-us:blog:the-top-software-developer-challenges-in-2022.yml","The Top Software Developer Challenges In 2022","en-us/blog/the-top-software-developer-challenges-in-2022.yml","en-us/blog/the-top-software-developer-challenges-in-2022",{"_path":7946,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7947,"content":7952,"config":7956,"_id":7958,"_type":14,"title":7959,"_source":16,"_file":7960,"_stem":7961,"_extension":19},"/en-us/blog/the-ultimate-guide-to-gitops-with-gitlab",{"title":7948,"description":7949,"ogTitle":7948,"ogDescription":7949,"noIndex":6,"ogImage":3500,"ogUrl":7950,"ogSiteName":670,"ogType":671,"canonicalUrls":7950,"schema":7951},"The ultimate guide to GitOps with GitLab","This eight-part tutorial series demonstrates how to use GitLab as a best-in-class GitOps tool.","https://about.gitlab.com/blog/the-ultimate-guide-to-gitops-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The ultimate guide to GitOps with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2022-04-07\",\n      }",{"title":7948,"description":7949,"authors":7953,"heroImage":3500,"date":1786,"body":7954,"category":791,"tags":7955},[1356],"\n\nIt is possible to use GitLab as a best-in-class GitOps tool, and this blog post series is going to show you how. [GitOps](/topics/gitops/) is an operational framework that takes DevOps best practices used for application development such as version control, collaboration, compliance, and CI/CD tooling, and applies them to infrastructure automation. This series of easy-to-follow tutorials will focus on different user problems, including provisioning, managing a base infrastructure, and deploying various third-party or custom applications on top of them, that can be solved by pairing GitOps with GitLab.\n\nHere are 8 tutorials on how to do GitOps with GitLab:\n\n**1. [Here's how to do GitOps with GitLab](/blog/gitops-with-gitlab/)**\n\nThis tutorial sets the stage for what you will learn throughout the series, including the tech concepts you'll need to know.\n\n**2. [Infrastructure provisioning with GitLab and Terraform](/blog/gitops-with-gitlab-infrastructure-provisioning/)**\n\nThis tutorial walks you through setting up the underlying infrastructure using GitLab and Terraform.\n\n**3. [Connect with a Kubernetes cluster](/blog/gitops-with-gitlab-connecting-the-cluster/)**\n\nThis tutorial demonstrates how to connect a Kubernetes cluster with GitLab for pull- and push-based deployments and easy security integrations.\n\n**4. [How to tackle secrets management](/blog/gitops-with-gitlab-secrets-management/)**\n\nThis tutorial builds on the previous tutorial to show you how to use a Kubernetes cluster connection to manage secrets within a cluster.\n\n**5. [The CI/CD tunnel](/blog/gitops-with-gitlab-using-ci-cd/)**\n\nThis tutorial introduces you to CI/CD tunnels and shows step-by-step how to access a Kubernetes cluster using GitLab CI/CD.\n\n**6. [Connecting GitLab with a Kubernetes cluster - Auto DevOps](/blog/gitops-with-gitlab-auto-devops/)**\n\nThis tutorial looks at how you can use Auto DevOps with all its bells and whistles to easily manage deployments.\n\n**7. [Connecting GitLab with a Kubernetes cluster for GitOps-style application delivery](/blog/gitops-with-gitlab/)**\n\nThis tutorial shows you how to connect an application project to a manifest project for controlled, GitOps-style deployments.\n\n**8. [Turn a GitLab agent for Kubernetes installation to manage itself](/blog/gitops-with-gitlab-manage-the-agent/)**\n\nThis tutorial is the culmination of the previous tutorials and will teach you how to turn a GitLab agent for Kubernetes installation to manage itself.\n\n\n**Read more about GitOps:**\n- [GitLab for GitOps](/solutions/gitops/)\n- [What is GitOps](/topics/gitops/)\n- [GitOps viewed as part of the Ops evolution](/blog/gitops-as-the-evolution-of-operations/)\n- [How to use a push-based approach for GitOps with GitLab scripting and variables](/blog/how-to-agentless-gitops-vars/)\n\n\n\n\n",[9,978,534],{"slug":7957,"featured":6,"template":686},"the-ultimate-guide-to-gitops-with-gitlab","content:en-us:blog:the-ultimate-guide-to-gitops-with-gitlab.yml","The Ultimate Guide To Gitops With Gitlab","en-us/blog/the-ultimate-guide-to-gitops-with-gitlab.yml","en-us/blog/the-ultimate-guide-to-gitops-with-gitlab",{"_path":7963,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7964,"content":7969,"config":7973,"_id":7975,"_type":14,"title":7976,"_source":16,"_file":7977,"_stem":7978,"_extension":19},"/en-us/blog/the-ultimate-guide-to-software-supply-chain-security",{"title":7965,"description":7966,"ogTitle":7965,"ogDescription":7966,"noIndex":6,"ogImage":2507,"ogUrl":7967,"ogSiteName":670,"ogType":671,"canonicalUrls":7967,"schema":7968},"The ultimate guide to software supply chain security","Coupling DevSecOps with software supply chain security results in the advanced protection organizations need.","https://about.gitlab.com/blog/the-ultimate-guide-to-software-supply-chain-security","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The ultimate guide to software supply chain security\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2022-08-30\",\n      }",{"title":7965,"description":7966,"authors":7970,"heroImage":2507,"date":2940,"body":7971,"category":769,"tags":7972},[1454],"\n\nThreats to the software supply chain are forcing a sea change in DevOps. Organizations are feeling internal pressure to embed security deep into their software development life cycles and external pressure to comply with numerous federal and industry mandates. What is emerging is a DevSecOps strategy that helps govern how code, applications, and infrastructure are protected across the software supply chain.\n\nThe pairing of DevSecOps with software supply chain security also ensures that, where possible, automation will be used to make processes repeatable, increasing security and reducing the opportunity for human error or malicious activity.   \n\nThis comprehensive guide provides deeper dives into all the aspects of software supply chain security so make sure to follow the embedded links.\n\n## The need for software supply chain security\n\nSecuring code is not a new concept. However, promoting security early on in the development life cycle is. The movement to shift security left has taken off, and “sec” is becoming part of the DevOps culture, morphing the concept wholly into DevSecOps. \n\nAlong with this evolution has been an increase in outside pressure – as formidable as [the federal government](/blog/biden-administration-celebrates-1-year-anniversary-of-eo-by-accelerating-software-supply-chain-security/) – to batten down software supply chains so that large attacks such as the [SolarWinds hack of 2020](/blog/what-the-solarwinds-attack-can-teach-us-about-devsecops/#a-brief-summary-of-the-solarwinds-incident) won’t threaten the nation’s critical infrastructure and cause unmitigated damage.\n\nEssentially, businesses must figure out how to meld their development, security, and operations teams internally while complying with numerous mandates from external organizations.\n\nLearn more about the key trends driving software supply chain security:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Tbiscg09-Ac\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Integrating sec into DevSecOps\n\nThe first step in securing the software supply chain is to create a cohesive DevSecOps approach to software development. In doing so, organizations can expand security in DevOps beyond basic tasks and better [understand myriad threat vectors](/blog/top-challenges-to-securing-the-software-supply-chain/).\n\n_[Security in the modern DevOps solution](/blog/are-you-ready-for-the-newest-era-of-devsecops/) goes beyond just shifting security features left to empower the developers to find and fix security flaws, but also provides end-to-end visibility and control over the entire SDLC to create, deliver, and run the applications._\n\nTeams that integrate security practices throughout their development process are 1.6 times more likely to meet or exceed their organizational goals, according to the Google Cloud DevOps Research and Assessment (DORA) “Accelerate State of DevOps 2021 Report”.\n\nSome [best practices elite DevSecOps teams use](/blog/elite-team-strategies-to-secure-software-supply-chains/) are:\n\n- Apply common controls for security and compliance\n- Automate common controls and CI/CD\n- Apply [zero-trust principles](/blog/why-devops-and-zero-trust-go-together/)\n- Inventory all tools and access, including infrastructure as code\n- Consider unconventional scale to find unconventional vulnerabilities\n- Secure containers and orchestrators\n\n## Understanding federal and industry mandates\n\nThe Biden administration has been singular in its demand that federal agencies and their vendors [make significant improvements in software supply chain security](https://www.whitehouse.gov/briefing-room/presidential-actions/2021/05/12/executive-order-on-improving-the-nations-cybersecurity/).\n\nThat sense of urgency has trickled down to the standards bodies, including the National Institute of Standards and Technology (NIST) and its [Secure Software Development Framework](https://csrc.nist.gov/Projects/ssdf), the Cybersecurity and Infrastructure Agency’s work on [Software Bill of Materials standards](https://www.cisa.gov/sbom), and [SLSA](https://slsa.dev/), a cross-industry collaboration on a security framework to secure the supply chain.\n\nCompliance officers within organizations are looking to DevSecOps teams to make it easy for them to audit the development life cycle and attest to requirements in these mandates.\n\n## How a DevOps platform helps \n\nIn our [2022 Global DevSecOps survey](/developer-survey/), respondents overwhelmingly told us that secure software development is now an imperative for their organization and that they believe security is the top reason to deploy a DevOps platform. \n\nA DevOps platform can certainly help [protect against software supply chain attacks](/blog/devops-platform-supply-chain-attacks/). Here are some examples how:\n\n- End-to-end visibility and auditability: Who changed what, where, and when.\n\n- Consistent application and administration of policies: Both what policies are used where, and the actions taken for exceptions\n\n- More intelligent response through greater end-to-end context\n\n- Reduced attack surface of a simplified toolchain\n\nDevOps platforms can even support more sophisticated software supply chain security techniques such as [securing pipeline builds with code signing](/blog/secure-pipeline-with-single-sign-in/). Code signing is an area of interest to standards bodies setting requirements for protecting software supply chains.\n \n## GitLab’s strengths in software supply chain security\n\nGitLab has been at the leading edge of DevSecOps, helping organizations to evolve their security practices from traditional application testing.\n\nFor instance, rather than being performed by security pros, using their own tools, at the end of the development cycle, security testing is automated within the CI pipeline with findings delivered to developers while they are still iterating on their code. Read how GitLab is also [revolutionizing CI and security, and remediation practices](/blog/gitlab-is-setting-standard-for-devsecops/).\n\nGitLab is laser-focused on enabling organizations to establish and manage security and compliance guardrails that allow developers to run fast while also managing risk, including the introduction of [continuous compliance and policy engines](/blog/gitlabs-newest-continuous-compliance-features-bolster-software/), as well as [automated attestation](/blog/securing-the-software-supply-chain-through-automated-attestation/) and [SBOMs](/blog/the-ultimate-guide-to-sboms/).\n\nThe GitLab partner ecosystem helps the platform to meet even more security needs, including [generating SBOMs\nautomatically](/blog/gitlab-and-testify-sec-witness-alliance/) and [protecting software from malicious modules](/blog/terraform-as-part-of-software-supply-chain-part1-modules-and-providers/).\n\nMore on GitLab’s software supply chain security vision can be found [here](/blog/gitlab-supply-chain-security/). And learn even more about securing the software supply chain as GitLab Field CTO [Lee Faus](https://gitlab.com/lfaus) answers some burning questions:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/HubJIQ-x2EA\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[875,9,749],{"slug":7974,"featured":6,"template":686},"the-ultimate-guide-to-software-supply-chain-security","content:en-us:blog:the-ultimate-guide-to-software-supply-chain-security.yml","The Ultimate Guide To Software Supply Chain Security","en-us/blog/the-ultimate-guide-to-software-supply-chain-security.yml","en-us/blog/the-ultimate-guide-to-software-supply-chain-security",{"_path":7980,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":7981,"content":7987,"config":7992,"_id":7994,"_type":14,"title":7995,"_source":16,"_file":7996,"_stem":7997,"_extension":19},"/en-us/blog/three-faces-of-user-calls",{"title":7982,"description":7983,"ogTitle":7982,"ogDescription":7983,"noIndex":6,"ogImage":7984,"ogUrl":7985,"ogSiteName":670,"ogType":671,"canonicalUrls":7985,"schema":7986},"How product managers can get more out of user calls","There are 3 types of user calls. Here's how GitLab product managers approach them and how we leverage our transparency value to better understand our users.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682372/Blog/Hero%20Images/michal-czyz-ALM7RNZuDH8-unsplash.jpg","https://about.gitlab.com/blog/three-faces-of-user-calls","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How product managers can get more out of user calls\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2022-07-20\",\n      }",{"title":7982,"description":7983,"authors":7988,"heroImage":7984,"date":7989,"body":7990,"category":1180,"tags":7991},[1356],"2022-07-20","\n\nOne of the core jobs of product managers is to speak with users to better understand their needs, pain points and the context in which they operate and use our products. But not all user calls are the same. \n\nThere are 3 prominent types of user calls:\n\n- Discovery or problem validation calls\n- Roadmap discussions\n- Solution validation calls\n\nHere's an in-depth look at how we approach the three types of user calls at GitLab.\n\n## Discovery calls\n\nDiscovery or problem validation calls are product managers' most crucial conversations with users. Discovery calls are typically set up to learn about our users in a targeted way. These calls help build a better understanding of users' pain points. \n\nFor discovery, we need a recipe for repeatable, comparable user calls. For this reason, we should create an interview script and follow that script on all the user calls. This does not mean these calls are robotic and devoid of improvisation, not at all! The script should provide the backbone of the discussions. We can adjust it either during the call or in advance based on prior knowledge about the user. Good discovery calls typically take the form of a deep-dive conversation: we know the script by heart and can run back and forth around it, always asking the questions that fit the conversation. \n\nFinding the right users is one of the most challenging parts of discovery calls. Thankfully, with GitLab, this is relatively easy. We can always reach out to the most active users on issues and invite them to a call. Another technique I employ is to find users in the [Cloud Native Computing Foundation](https://www.cncf.io) and Kubernetes communities' Slack channels and articles on [Medium](https://medium.com). This way, I can also find non-GitLab users, a set of people likely more valuable to interview than existing users. Finally, we can recruit users with the support of the account managers. They are always helpful in connecting PMs with users. Asking the users about their needs shows them that we genuinely care about them.\n\nThere are at least two distinct discovery calls: PM-led or UX-led. UX research typically works on projects with a strict scope. For PM-driven calls, a great framework is [\"Continuous discovery\" calls by Teresa Torres](https://www.producttalk.org/continuous-discovery/). With continuous discovery, we build a deep understanding of our users and get well-understood opportunities. The technique allows us to get a broad view and to dive deep into specific aspects of our problem space when needed.\n\n## Roadmap discussions\n\nRoadmap discussion calls are typically initiated by sales or account management teams. Product managers are asked to join the prospect/customer call to strengthen our positions and show how much we care for the customer. \n\nTo prepare for roadmap discussions, PMs should have an effective way to present the roadmap. This typically happens in the form of slides. A diligent PM might even prepare something specifically for the client.\n\nDuring these calls, the user/customer/prospect will typically ask the questions, and the PMs respond. Our role in these calls is to represent the truth. We might be tempted to paint a rosier picture about the current or expected state of the product than is actually true, and we should avoid making time-bound promises.\n\nWhat are the expected outcomes of roadmap discussions? They can help strengthen our position with the user. Remember that these calls primarily cater to our customers/users and customer-facing teams. As such, they are unlikely to provide deep learning about our users. \n\nIf we approach these calls with the intention to prove that our roadmap is correct, we will likely fall victim to both response and confirmation biases. There are techniques to validate a roadmap, but they are more aligned with problem validation than roadmap discussion calls. For example, UX researchers should be able to help validate a roadmap as a UX research project.\n\n## Solution validation calls\n\nLast but not least, we have solution validation calls. These calls serve our learning but are way more focused than discovery calls. Solution validation calls require some form of a prototype for a specific problem we want to test and get feedback on from our users.\n\nAt GitLab, the prototypes are typically built by product design or engineering. The product manager might miss some of these calls in an empowered and autonomous team. But, as these calls are great learning experiences, we should aim to be there to support and learn if we can.\n\nA solution validation call might be started with a concise roadmap discussion. Unlike in sales calls, our aim is not to influence the user but to set the scene for solution validation. The central part of the call should be around the proposed solution. We should provide the least amount of guidance to our users since there are no humans available to direct our users when they are working with the actual product. If much guidance is required, that is a sign that we might want to rethink our UX approach.\n\nFinding suitable interview candidates for a solution validation call might be tricky. For GitLab, we often use the shortcut of inviting users based on their activity on relevant issues. Sometimes, when our issues provide enough context, we might get some solution validation asynchronously as users give their feedback directly in the issue.\n\n## How many calls?\n\nHow often does a good PM have all these calls? For discovery calls, I aim to have 3 calls per week. Above this, I don’t mind taking 1 sales call. While I prefer the product designer to run solution validation calls, I try to participate there too. Not every solution requires dedicated validation, so having a target number for solution validation calls is unrealistic. The better the discovery calls are, the fewer solution validation calls you might need. Still, even the best discovery cannot and should not answer all the questions of a solution validation. Often there are different (and totally valid) approaches to the same problem, and we need to pick the one that is the easiest for users to understand.\n\nI think we need to speak to our users every day. Working at GitLab, sometimes this might take the form of issue comments, but face-to-face calls are a must. In any case, during these discussions we should aim to learn from our users, not just answer their questions. A handy question in issues is to ask for more context from our users. The response might highlight unknown use cases or edge cases we missed previously.\n\n## Take the calls\n\nIt is helpful to remember all the user call types we practice as PMs. As mentioned, I think the most crucial user calls for PMs are the discovery calls. If we don’t make discovery calls, nobody will; also, PMs might not be needed in the other calls. That said, a product manager's job is to also help the business be viable. So we should be able to support sales and always have a deck ready for roadmap calls. Lastly, we should work continuously with our team on solution validation so that everyone is confident in our solution.\n\n",[9,793,1515],{"slug":7993,"featured":6,"template":686},"three-faces-of-user-calls","content:en-us:blog:three-faces-of-user-calls.yml","Three Faces Of User Calls","en-us/blog/three-faces-of-user-calls.yml","en-us/blog/three-faces-of-user-calls",{"_path":7999,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8000,"content":8006,"config":8010,"_id":8012,"_type":14,"title":8013,"_source":16,"_file":8014,"_stem":8015,"_extension":19},"/en-us/blog/three-steps-to-optimize-software-value-streams",{"title":8001,"description":8002,"ogTitle":8001,"ogDescription":8002,"noIndex":6,"ogImage":8003,"ogUrl":8004,"ogSiteName":670,"ogType":671,"canonicalUrls":8004,"schema":8005},"GitLab's 3 steps to optimizing software value streams","Discover the power of GitLab Value Streams Dashboard (VSD) for optimizing software delivery workflows.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667893/Blog/Hero%20Images/workflow.jpg","https://about.gitlab.com/blog/three-steps-to-optimize-software-value-streams","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's 3 steps to optimizing software value streams\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Haim Snir\"}],\n        \"datePublished\": \"2023-06-26\",\n      }",{"title":8001,"description":8002,"authors":8007,"heroImage":8003,"date":6940,"body":8008,"category":769,"tags":8009},[4146],"\n\n\u003Ci>This is part three of our multipart series introducing you to the capabilities within GitLab Value Stream Management and the Value Streams Dashboard. In part one, [learn about the Total Time Chart](https://about.gitlab.com/blog/value-stream-total-time-chart/) and how to simplify top-down optimization flow with Value Stream Management. In part two, learn how to [get started with the Value Streams Dashboard](https://about.gitlab.com/blog/getting-started-with-value-streams-dashboard/). \u003C/i>\n\nIt’s no news that software development is a complex process that involves many different stages, teams, and tools. With significant investments made in digital transformation and adopting new tools following the shift to remote work, measuring and managing the business value of the software development lifecycle (SDLC) have become more complex.\n\nThis is where Value Stream Management (VSM) comes in. VSM is a methodology that helps organizations optimize their software delivery process by visualizing, measuring, and improving the flow of value (a.k.a. the “value stream”) from ideation to production. Some examples are: the amount of time it takes to go from an idea to production, the velocity of the project, bottlenecks in the development process, and long-running issues or merge requests. As you’ve probably guessed from its title, this blog will cover how the [new capabilities of GitLab Value Streams Dashboard](https://about.gitlab.com/releases/2023/05/22/gitlab-16-0-released/#value-streams-dashboard-is-now-generally-available) can help you do all that, and optimize your software delivery.\n\n## Value Stream Management in a nutshell \nGitLab [VSM](https://about.gitlab.com/solutions/value-stream-management/) provides end-to-end visibility into your software delivery process. It enables you to [map out your value stream](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#create-a-value-stream-with-custom-stages), identify bottlenecks, measure key metrics, and identify the places where you are either lagging or doing exceptionally well. It then also allows you to take action on these insights. In essence, GitLab VSM helps you to understand and optimize your development processes to deliver software faster and better.\n\n![GitLab Value Stream Analytics](https://about.gitlab.com/images/blogimages/2023-05-24-vsm-overview.png){: .shadow}\nWith Value Stream Analytics, you can establish a baseline for measuring software delivery performance progress and identifying the touchpoints in the process that do not add value to the customer or your business.\n{: .note.text-center}\n\nAnd if you’re wondering how GitLab VSM is able to do that, it’s because GitLab provides an entire DevSecOps platform as a single application and, therefore, holds all the data needed to provide end-to-end visibility throughout the entire SDLC. So now, your decisions rely on actual data rather than blind estimation or gut feelings. Additionally, since GitLab is the place where work happens, these insights are also actionable, allowing your users to move from “understanding” to “fixing” at any time, from within their workflow and without losing context.\n\n## How VSM works: The three-step analysis\nLet’s take a look at how GitLab VSM helps you optimize your SDLC in three easy steps:\n\n**Step 1:** Get an end-to-end view across your entire organization and pinpoint the value streams you need to focus on.\n\nThe [Value Streams Dashboard](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html) is a centralized view where you can see and compare all of the SDLC metrics of all your organization's projects. This dashboard enables you to identify hotspots in your SDLC streams — projects or teams that are underperforming, with longer stages and cycle times. It also shows you where you have the largest value contributors, so you can identify and learn what is working well and what's not. With this information at hand, you can now prioritize your efforts and understand where to spend your time.\n\n![VSM illustration](https://about.gitlab.com/images/blogimages/2023-05-24_vsm1.gif){: .shadow}\n\n\nThis centralized UI acts as a single source of truth for your organization, where all the relevant stakeholders can access, view, and analyze the same set of metrics. This ensures everyone is on the same page, promoting consistency in analysis and decision-making.\n\nRead more: [Getting started with the new GitLab Value Streams Dashboard](https://about.gitlab.com/blog/getting-started-with-value-streams-dashboard/)\n\n**Step 2:** Drill down into a specific project.\n\nWhen you select a project from the main dashboard, you are directed to that project's Value Stream Analytics (VSA), where you see its value stream. The project's metrics are presented for each stage of the project, helping you understand where the main work lies and which stages need improvement. The VSA overview provides valuable insights into lead times, cycle times, and other critical metrics that help you identify areas for optimization.\n\n![VSM illustration](https://about.gitlab.com/images/blogimages/2023-05-24_vsm2.gif){: .shadow}\n\n\nRead more: [Value stream management: Total Time Chart simplifies top-down optimization flow](https://about.gitlab.com/blog/value-stream-total-time-chart/)\n\n**Step 3:** Dive deep into the Value Stream Analytics dashboard to analyze and fix issues.\n\nOnce the main areas of interest are identified, GitLab Value Stream Analytics (VSA) enables you to drill down further into a specific stage of the project. In the stage table, you can sort the **Last event** column to view the most recent workflow event, and sort the items by **duration** so you can rearrange the events and gain insights faster. This way, you can easily detect work items that are slowing down the project in that stage. Here's an example how we dogfood [VSA on gitlab-org](https://gitlab.com/gitlab-org/gitlab/-/value_stream_analytics). \n\nYou can identify the owner of the work items responsible for the delays, examine code changes, and perform a comprehensive analysis of the issue. This level of visibility and traceability empowers you to take targeted actions and make the necessary improvements to optimize the value stream, all within the context of your current workflow.\n\n![VSM illustration](https://about.gitlab.com/images/blogimages/2023-05-24_vsm3.gif){: .shadow}\nUse GitLab Value Stream Management to visualize the progress of work from planning to value delivery, and gain actionable context.\n{: .note.text-center}\n\n## The value of Value Stream Management\nGitLab VSM is a powerful solution that fits seamlessly into your SDLC. By providing end-to-end visibility and granular, actionable insights into the value stream, VSM enables you to optimize your software delivery and provide value to your customers faster. Access the information you need, when you need it — and easily act on it from within your workplace. VSM offers you the best of both worlds: out-of-the-box functionality and the ability to customize features.\n\nSay goodbye to time-consuming searches and hello to instant access to the information you need most. To learn more, check out the [Value Stream Analytics documentation](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html).\n\nTo help us improve the Value Stream Management, please share feedback about your experience in this [survey](https://gitlab.fra1.qualtrics.com/jfe/form/SV_50guMGNU2HhLeT4).\n",[855,9,916,1040,683],{"slug":8011,"featured":6,"template":686},"three-steps-to-optimize-software-value-streams","content:en-us:blog:three-steps-to-optimize-software-value-streams.yml","Three Steps To Optimize Software Value Streams","en-us/blog/three-steps-to-optimize-software-value-streams.yml","en-us/blog/three-steps-to-optimize-software-value-streams",{"_path":8017,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8018,"content":8023,"config":8028,"_id":8030,"_type":14,"title":8031,"_source":16,"_file":8032,"_stem":8033,"_extension":19},"/en-us/blog/three-things-you-might-not-know-about-gitlab-security",{"title":8019,"description":8020,"ogTitle":8019,"ogDescription":8020,"noIndex":6,"ogImage":5226,"ogUrl":8021,"ogSiteName":670,"ogType":671,"canonicalUrls":8021,"schema":8022},"Three things you might not know about GitLab security","There's so much more to GitLab's security offering than meets the eye. Here are three features you may have missed.","https://about.gitlab.com/blog/three-things-you-might-not-know-about-gitlab-security","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Three things you might not know about GitLab security\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Matt Wilson\"}],\n        \"datePublished\": \"2021-11-23\",\n      }",{"title":8019,"description":8020,"authors":8024,"heroImage":5226,"date":2921,"body":8026,"category":875,"tags":8027},[8025],"Matt Wilson","\n\nOver the past couple of years, our users have come to know and regularly use our many security features that are part of the [Secure](/stages-devops-lifecycle/secure/) and [Protect](/stages-devops-lifecycle/govern/) stages. We have seen success stories from customers who have improved their security postures by reducing vulnerabilities in application code. One thing that surprises me when I speak to our users is that many aren’t aware of some of our most useful features. Here are three things you really should know about GitLab’s capabilities that will help take your security game to the next level.\n\n## We have a GraphQL API!\n\nGitLab has long offered a [REST API](https://docs.gitlab.com/ee/api/api_resources.html). It is quite capable but when it comes to vulnerability management, it is limited in what you can do. Our [GraphQL API](https://docs.gitlab.com/ee/api/graphql/index.html) is newer and is the area of focus for new API development. Vulnerability management in particular has quite an extensive feature set in the GraphQL API. Whether you are looking to build task automation, create custom reports, or pull in vulnerability data from external sources, GraphQL is your go to resource.\n\nBringing in vulnerability data from outside GitLab is a new capability worth calling extra attention to. You can use GraphQL to [directly create vulnerability records](https://docs.gitlab.com/ee/api/graphql/reference/#mutationvulnerabilitycreate) on projects. This is great for migrating vulnerability data from other systems, creating integrations with a bug bounty program, or even bringing in results from security tools that don’t run in GitLab pipeline jobs. I’m sure our users will come up with many more creative use cases. Even better, these vulnerability records show up in [Vulnerability Reports](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/) and [Security Dashboards](https://docs.gitlab.com/ee/user/application_security/security_dashboard/) just like results from any of our many included security scanners.\n\n## Security approvals help stop new vulnerabilities\n\nA primary goal of any application security program is to reduce risk by keeping vulnerabilities out of deployed code. One of the best ways to do this is by preventing new vulnerabilities from getting into your main branch in the first place. Scanning feature branches on every commit is a recommended practice many of our customers employ. But it’s how to keep vulnerability findings from being merged where I see a lot missing out on a power feature that can help.\n\nI commonly see pipelines configured to block or fail if any security scan jobs detect a potential vulnerability in new code. While this approach is effective in keeping new vulnerabilities from being merged, it can be more disruptive and less efficient for developers and AppSec teams. Instead, we recommend using [security approvals in merge requests](https://docs.gitlab.com/ee/user/application_security/index.html#security-approvals-in-merge-requests). Like normal MR approval rules, you first specify one or more individuals that will be part of the security approval group. Members of security approval groups don’t even need to have merge rights to the project so you can have [segregation of duties](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/rules.html#merge-request-approval-segregation-of-duties). You then configure the detection rule to set the number of approvals required, severity levels that trigger the approval and even which scanners the rule applies to. And while you are setting up your approval rules, consider enabling the setting that [prevents merge approvals by the MR author](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/settings.html#prevent-approval-by-author) for further segregation of duties.\n\nSecurity approval rules are great for a few reasons. First, you can more quickly enable and configure them on a project than custom pipeline behaviors. Also, only project owners and maintainers are able to access and modify these approvals. Contrast this with pipelines where anyone with the developer role can change pipeline configurations by default. Security approvals are also more visible and collaborative. When a pipeline is blocked or fails, the developer must navigate into the pipeline and try to figure out what failed by reading the job output. When a security approval is triggered, it will clearly show on the MR that merging is blocked until the flagged vulnerabilities are removed or approval is provided from the required number of security approvers. And because you can see any [scanner findings on the MR](https://docs.gitlab.com/ee/user/application_security/index.html#ultimate), developers can not only quickly investigate these potential vulnerabilities, they can also add comments and communicate with the security team. Best of all, developers can simply fix any findings that would require approval. Once the security scans no longer detect the violations, merging is immediately possible again.\n\n## Compliance pipelines enforce security hygiene\n\nLast but certainly not least is the newest of these three features: [compliance pipelines](https://docs.gitlab.com/ee/user/project/settings/index.html#compliance-pipeline-configuration). Have you ever wanted to make sure your code branches are properly scanned for vulnerabilities but you were having trouble auditing and enforcing it? Compliance pipelines to the rescue! Compliance pipelines allow group owners to add an additional pipeline configuration to projects. These configurations are combined with any existing configurations for the project pipeline. Compliance pipeline configurations are evaluated before any project configurations meaning they can override any values in the project pipeline. This is a powerful tool for automatically enforcing compliance with various regulatory and private industry standards as well as any internal company policies.\n\nCompliance pipelines work best when combined with [compliance frameworks](https://docs.gitlab.com/ee/user/project/settings/index.html#compliance-frameworks). Compliance frameworks allow group owners to specify the location of a compliance pipeline configuration. The configuration can be stored and managed in a dedicated project with restricted access. Special compliance framework labels are created which can then be applied by the group owner to any projects within the group. This label is what tells a project’s pipeline to pull in the associated compliance pipeline configuration. For example, you might create a PCI compliance label. You then simply apply the label to any projects within the scope of PCI such as any that process or store customer information and payment details.\n\nContinuing with our PCI example, you can enforce code scanning with these two features in place. Simply create a compliance pipeline configuration with the desired scanners included such as SAST and Secret Detection. Be sure the configuration file is in a project with access granted only to those users who should have permissions to modify it. Then, edit your PCI compliance label in your group settings and point it to the compliance pipeline configuration. You can even allow compliance job values to be settable at the project level. This means you can, for example, ensure a SAST job runs but leave room to select the right language-specific analyzers for a particular project’s codebase. Even better, [use GraphQL to quickly apply compliance labels](https://docs.gitlab.com/ee/api/graphql/reference/index.html#mutationprojectsetcomplianceframework) to multiple projects.\n\n## Wrapping it up\n\nWith so many features in a single platform, it is easy to overlook some. The ones I’ve shared are only a few of the many security-related features GitLab includes. They are also important to know about because of the additional flexibility and control they offer in addition to our comprehensive security scanning capabilities. I hope you’ve found at least one new idea to add to your security toolbelt.\n",[9,875,916],{"slug":8029,"featured":6,"template":686},"three-things-you-might-not-know-about-gitlab-security","content:en-us:blog:three-things-you-might-not-know-about-gitlab-security.yml","Three Things You Might Not Know About Gitlab Security","en-us/blog/three-things-you-might-not-know-about-gitlab-security.yml","en-us/blog/three-things-you-might-not-know-about-gitlab-security",{"_path":8035,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8036,"content":8042,"config":8047,"_id":8049,"_type":14,"title":8050,"_source":16,"_file":8051,"_stem":8052,"_extension":19},"/en-us/blog/three-yaml-tips-better-pipelines",{"title":8037,"description":8038,"ogTitle":8037,"ogDescription":8038,"noIndex":6,"ogImage":8039,"ogUrl":8040,"ogSiteName":670,"ogType":671,"canonicalUrls":8040,"schema":8041},"3 YAML tips for better pipelines","Learn how to get the most out of your YAML configs.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681626/Blog/Hero%20Images/yaml-tips.jpg","https://about.gitlab.com/blog/three-yaml-tips-better-pipelines","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"3 YAML tips for better pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2020-10-01\",\n      }",{"title":8037,"description":8038,"authors":8043,"heroImage":8039,"date":8044,"body":8045,"category":679,"tags":8046},[788],"2020-10-01","\n\nAt GitLab, we’re fans of YAML. But for all of its benefits, we’d be lying if we said YAML hasn’t caused its fair share of headaches, too.\n\n[YAML](https://yaml.org/) is used industry-wide for declarative configuration. YAML offers flexibility and simplicity, as long as you know the rules and limitations. Since YAML is platform-agnostic, knowing best practices around YAML configurations is a transferable skillset in a cloud native world.\n\n## What are the benefits of YAML?\n\nYAML is a data serialization language designed to be human-friendly. YAML is easy to use in a text editor, has a simple syntax that works across programming languages, and can store a lot of important configuration data (typically in a .yml or .yaml file).\n\n[YAML is data-oriented](https://blog.stackpath.com/yaml/) and has features derived from Perl, C, HTML, and others.\n\nBecause YAML is a superset of JSON, it has built-in advantages including comments, self-referencing, and support for complex data types.\n\nA [YAML file uses declarative configuration](https://www.codeproject.com/Articles/1214409/Learn-YAML-in-five-minutes) to describe a variety of structures, such as API data structures and even deployment instructions for virtual machines and containers, to name a few.\n\nYAML is comprehensive, widely-used, and works in every type of development environment.\n\n## YAML tip #1: Let other tools do the formatting for you\n\nYAML is one of those languages where it’s minimalism is both a blessing and a curse, depending on who you ask. It also relies on the syntactically significant whitespace that is a source of [heated debate](https://wiki.c2.com/?SyntacticallySignificantWhitespaceConsideredHarmful) among developers. For a language where formatting is a king, what can developers do to make sure they stay within the rules without having to analyze every single space and indentation?\n\nMany text editors and platforms have plugins or built-in tools to check YAML configuration syntax for you.\n\n*   [Atom](http://atom.io/), the open source text editor, comes with a default YAML mode.\n*   [Red Hat YAML support](https://marketplace.visualstudio.com/items?itemName=redhat.vscode-yaml) provides YAML Language and Kubernetes syntax support to the [VS Code editor](https://code.visualstudio.com/).\n*   [OnlineYamlTools](https://onlineyamltools.com/edit-yaml) has a web-based editor that will do in a pinch. It also links to other helpful options such as converting JSON to YAML, etc.\n*   [SlickEdit](https://www.slickedit.com/products/slickedit/448-the-most-powerful-yaml-editor-in-the-world#:~:text=SlickEdit%20%2D%20The%20most%20powerful%20YAML,source%20diff%2C%20and%20much%20more.) is the self-described \"most powerful YAML editor in the world\" and has some helpful features to back it up (at a cost). SlickEdit offers a free trial.\n*   [Pretty YAML](https://packagecontrol.io/packages/Pretty%20YAML) is a plugin for Sublime Text 2 and 3 that allows you to format YAML files.\n\n[Linters](https://sourcelevel.io/blog/what-is-a-linter-and-why-your-team-should-use-it) are used in the development process to analyze code for stylistic and formatting errors, among other things. Teams adopt linters and other static tools by integrating them into their integrated development environment (IDE) of choice, and/or by running them as an additional step in their continuous integration (CI).\n\nIn GitLab, we have a [CI lint](https://docs.gitlab.com/ee/ci/lint.html#validate-basic-logic-and-syntax) that checks the syntax of your CI YAML configuration that also runs some basic logical validations.\n\nTo use the CI lint, paste a complete CI configuration (`.gitlab-ci.yml` for example) into the text box and click `Validate`:\n\n![GitLab CI lint](https://docs.gitlab.com/ee/ci/img/ci_lint.png)\n\n## YAML tip #2: Keep it simple\n\nIt’s easy to overwhelm the minimalism of a YAML file by including too many details, or by being inconsistent with formatting. When it comes to YAML, less is often more.\n\nIt isn’t necessary to specify every single attribute. `Job timeout` is an example of an attribute that can be left out, since this is something that is sometimes specified elsewhere. An example in GitLab is [interruptible](https://docs.gitlab.com/ee/ci/yaml/#interruptible), which is used to indicate that a job should be canceled if made redundant by a newer pipeline run. Since this defaults to `false` it’s not always necessary to include it.\n\nSome people indent gratuitously when writing YAML to help themselves visualize large chunks of data. To better visualize how data works together, it might be helpful to create a \"pseudo-config\" before committing the code to YAML. On the [Red Hat blog](https://www.redhat.com/sysadmin/yaml-tips), a pseudo-config is described as pseudo-code where you don't have to worry about structure or indentation, parent-child relationships, inheritance, or nesting. Just write the data down as you understand it.\n\n![Red Hat pseudo config](https://www.redhat.com/sysadmin/sites/default/files/inline-images/pseudoyaml.jpg)\n\nOnce you understand how the data correlates, then you can commit it to YAML.\n\nRegardless of how you define simplicity in your workflow, try to keep YAML configs uncluttered and include only the necessary data. And if you’re not sure what data is necessary, write out a pseudo-config to help you visualize it.\n\n\n\n## YAML tip #3: Reuse config when possible\n\nStarting from scratch is a lot of wasted effort, and YAML is no exception. One of the best parts of YAML is its reusabilty, and reusing config is a way to keep files consistent within an organization.\n\nOne way to [avoid duplicated configuration](https://docs.gitlab.com/ee/ci/yaml/#include) is by using the `include` keyword, which allows the inclusion of external YAML files. For example, global default variables for all projects that don’t need to be modified for every file. The `include` keyword helps to break down a YAML configuration into multiple files and boosts readability, especially for long files. It’s also possible to have template files stored in a central repository and projects included in their configuration files.\n\n`extends` is a great way to reuse some YAML config in multiple places, for example:\n\n```\n.image_template:\n  image:\n    name: centos:latest\n\ntest:\n  extends: .image_template\n  script:\n    - echo \"Testing\"\n\ndeploy:\n  extends: .image_template\n  script:\n    - echo \"Deploying\"\n```\n\nYAML has a handy feature called [anchors](https://docs.gitlab.com/ee/ci/yaml/yaml_optimization.html#anchors), which lets you easily duplicate content across your document. Anchors can be used to duplicate/inherit properties, and is a perfect example to be used with [hidden jobs](https://docs.gitlab.com/ee/ci/jobs/#hide-jobs) to provide templates for your jobs. When there is duplicate keys, GitLab will perform a reverse deep merge based on the keys.\n\n```\n.job_template: &job_definition  # Hidden key that defines an anchor named 'job_definition'\n  image: ruby:2.6\n  services:\n    - postgres\n    - redis\n\ntest1:\n  &lt;\u003C: *job_definition           # Merge the contents of the 'job_definition' alias\n  script:\n    - test1 project\n\ntest2:\n  &lt;\u003C: *job_definition           # Merge the contents of the 'job_definition' alias\n  script:\n    - test2 project\n```\n\nOne big caveat to anchors: You can’t use anchors across multiple files when leveraging the `include` feature.\n\nInstead of building pipelines from scratch, [CI/CD pipeline templates](/blog/get-started-ci-pipeline-templates/) simplify the process by having parameters already built-in. At GitLab, pipelines are defined in a `gitlab-ci.yml` file. Because our CI/CD templates come in over 30 popular languages, chances are good that we have the template you need to get started in our [CI template repository](https://gitlab.com/gitlab-org/gitlab/tree/master/lib/gitlab/ci/templates).\n\nTemplates can be modified and are created to fit many use cases. To see a large `.gitlab-ci.yml` file used in an enterprise, see the [.gitlab-ci.yml file for GitLab](https://gitlab.com/gitlab-org/gitlab/blob/master/.gitlab-ci.yml).\n\nWhether you’re a YAML lover, YAML novice, or using YAML against your will, knowing some tips and tricks can make your development process a better experience. Do you have any YAML tips or recommendations? Feel free to comment down below.\n\nCurious about GitLab CI/CD and want to show off your YAML skills? [Try GitLab free for 30 days](/free-trial/).\n\nCover image by [Harits Mustya Pratama](https://unsplash.com/@haritsmustya?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/greenhouse?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n\n## Related content\n[GitLab CI/CD pipeline configuration reference](https://docs.gitlab.com/ee/ci/yaml)\n\n[Unlock better DevOps with GitLab CI/CD](https://about.gitlab.com/blog/better-devops-with-gitlab-ci-cd/)\n\n[Pipeline efficiency](https://docs.gitlab.com/ee/ci/pipelines/pipeline_efficiency.html)\n",[109,9],{"slug":8048,"featured":6,"template":686},"three-yaml-tips-better-pipelines","content:en-us:blog:three-yaml-tips-better-pipelines.yml","Three Yaml Tips Better Pipelines","en-us/blog/three-yaml-tips-better-pipelines.yml","en-us/blog/three-yaml-tips-better-pipelines",{"_path":8054,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8055,"content":8061,"config":8066,"_id":8068,"_type":14,"title":8069,"_source":16,"_file":8070,"_stem":8071,"_extension":19},"/en-us/blog/tips-for-managing-monorepos-in-gitlab",{"title":8056,"description":8057,"ogTitle":8056,"ogDescription":8057,"noIndex":6,"ogImage":8058,"ogUrl":8059,"ogSiteName":670,"ogType":671,"canonicalUrls":8059,"schema":8060},"5 Tips for managing monorepos in GitLab","Learn the benefits of operating a monolothic repository and how to get the most out of this structure.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667591/Blog/Hero%20Images/code-review-blog.jpg","https://about.gitlab.com/blog/tips-for-managing-monorepos-in-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 Tips for managing monorepos in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarah Waldner\"}],\n        \"datePublished\": \"2022-07-12\",\n      }",{"title":8056,"description":8057,"authors":8062,"heroImage":8058,"date":8063,"body":8064,"category":791,"tags":8065},[5560],"2022-07-12","\nGitLab was founded 10 years ago on Git because it is the market leading version control system. As [Marc Andressen pointed out in 2011](https://www.wsj.com/articles/SB10001424053111903480904576512250915629460), we see teams and code bases expanding at incredible rates, testing the limits of Git. Organizations are experiencing significant slowdowns in performance and added administration complexity working on enormous repositories or monolithic repositories. \n\n## Why do organizations develop on monorepos? \n\nGreat question. While [some](https://www.infoworld.com/article/3638860/the-case-against-monorepos.html) might believe that monorepos are a no-no, there are valid reasons why companies, including  Google or GitLab (that’s right! We operate a monolithic repository), choose to do so. The main benefits are: \n\n- Monorepos can reduce silos between teams, streamlining collaboration on design, development, and operation of different services because everything is within the same repository.\n- Monorepos help organizations standardize on tooling and processes. If a company is pursuing a DevOps transformation, a monorepo can help accelerate change management when it comes to new workflows or the rollout of new tools.\n- Monorepos simplify dependency management because all packages can be updated in a single commit.\n- Monorepos offer unified CI/CD and build processes. Having all services in a single repository means that you can set up one system of pipelines for everyone.\n\nWhile we still have a ways to go before monorepos or monolithic repositories are as easy to manage as multi-repos in GitLab, we put together five tips and tricks to maintain velocity while developing on a monorepo in GitLab.\n\n**1. Use CODEOWNERS to streamline merge request approvals**\n\nCODEOWNERS files live in the repository and assign an owner to a portion of the code, making it super efficient to process changes. Investing time in setting up a robust [CODEOWNERS file](https://docs.gitlab.com/ee/user/project/codeowners/) that you can then use to automate [merge request approvals](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/) from required people will save time down the road for developers. \n\nYou can then set your merge requests so they must be approved by Code Owners before merge. CODEOWNERS specified for the changed files in the merge request will be automatically notified.\n\n**2. Improve git operation performance with Git LFS**\n\nA universal truth of git is that managing large files is challenging. If you work in the gaming industry, I am sure you’ve been through the annoying process of trying to remove a binary file from the repository history after a well-meaning coworker committed it. This is where [Git LFS](https://docs.gitlab.com/ee/topics/git/lfs/#git-large-file-storage-lfs) comes in. Git LFS keeps all the big files in a different location so that they do not exponentially increase the size of a repository.\n\nThe GitLab server communicates with the Git LFS client over HTTPS. You can enable Git LFS for a project by toggling it in [project settings](https://docs.gitlab.com/ee/user/project/settings/index.html#configure-project-visibility-features-and-permissions). All files in Git LFS can be tracked in the GitLab interface. GitLab indicates what files are stored there with the LFS icon.\n\n**3. Reduce download time with partial clone operations**\n\n[Partial clone](https://docs.gitlab.com/ee/topics/git/partial_clone.html#partial-clone) is a performance optimization that allows Git to function without having a complete copy of the repository. The goal of this work is to allow Git to better handle extremely large repositories.\n\nAs we just talked about, storing large binary files in Git is normally discouraged, because every large file added is downloaded by everyone who clones or fetches changes thereafter. These downloads are slow and problematic, especially when working from a slow or unreliable internet connection.\n\nUsing partial clone with a file size filter solves this problem, by excluding troublesome large files from clones and fetches. \n\n**4. Take advantage of parent-child pipelines**\n\n[Parent-child pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html) are where one pipeline triggers a set of downstream pipelines in the same project. The downstream pipelines still execute in the same stages or sequence without waiting for other pipelines to finish. Additionally, child pipelines reduce the configuration to the child pipeline, making it easier to interpret and understand. For monorepos, using parent-child pipelines in conjunction with `rules:changes` will only run pipelines on specified files changes. This reduces wasted time running pipelines across the entire repository.  \n\n**5. Use incremental backups to eliminate downtime** \n\n[Incremental backups](https://docs.gitlab.com/ee/raketasks/backup_restore.html#incremental-repository-backups) can be faster than full backups because they only pack changes since the last backup into the backup bundle for each repository. This is super useful when you are working on a large repository and only developing on certain parts of the code base at a time.\n\n## Where we are headed\n\nWhile these tips have helped many customers migrate from other version control systems to GitLab, we know there is still room for improvement. Over the next year, you will see us working on the following projects. We’d LOVE to hear from you, so share your thoughts, ideas, or simply 👍 on an issue to help prioritize things that will make your life easier.\n\n- [Git for enormous repositories](https://gitlab.com/groups/gitlab-org/-/epics/773)\n- [Expand SAST scanner support for monorepos](https://gitlab.com/groups/gitlab-org/-/epics/4895)\n- [Allow Reports to be Namespace to support monorepos](https://gitlab.com/gitlab-org/gitlab/-/issues/299490)\n",[9,916,1040,1180,683],{"slug":8067,"featured":6,"template":686},"tips-for-managing-monorepos-in-gitlab","content:en-us:blog:tips-for-managing-monorepos-in-gitlab.yml","Tips For Managing Monorepos In Gitlab","en-us/blog/tips-for-managing-monorepos-in-gitlab.yml","en-us/blog/tips-for-managing-monorepos-in-gitlab",{"_path":8073,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8074,"content":8080,"config":8084,"_id":8086,"_type":14,"title":8087,"_source":16,"_file":8088,"_stem":8089,"_extension":19},"/en-us/blog/too-many-toolchains-a-devops-platform-migration-is-the-answer",{"title":8075,"description":8076,"ogTitle":8075,"ogDescription":8076,"noIndex":6,"ogImage":8077,"ogUrl":8078,"ogSiteName":670,"ogType":671,"canonicalUrls":8078,"schema":8079},"Ditch toolchain problems with a DevOps platform","Migrating to a platform is the next step in the DevOps evolution.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667978/Blog/Hero%20Images/go-tools-and-gitlab.jpg","https://about.gitlab.com/blog/too-many-toolchains-a-devops-platform-migration-is-the-answer","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Ditch toolchain problems with a DevOps platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-08-24\",\n      }",{"title":8075,"description":8076,"authors":8081,"heroImage":8077,"date":6861,"body":8082,"category":769,"tags":8083},[810],"\n\nBy adopting DevOps tools without an end-to-end platform, teams have been adding complexity, mounting costs, and headaches to their job. [Migrating to a true Devops platform](https://page.gitlab.com/migrate-to-devops-guide.html) is the way to get out from under all of that and gain control of projects, break down silos, and cultivate collaboration.\n\nCompanies are increasingly turning to DevOps to create software more efficiently and securely. However, not all of them have adopted a [single DevOps platform](/blog/welcome-to-the-devops-platform-era/), instead opting to cobble together a myriad of tools to handle everything in the software development lifecycle – from planning to delivery. Of course, DevOps tools are helpful, but there can be too much of a good thing.\n\nThis do-it-yourself, or DIY, effort creates a mish-mash of tools that force team members to continuously jump back and forth between multiple interfaces, passwords, and ways of working. It also creates a chaotic environment that needs to be endlessly updated and held together with digital duct tape. And by using a plethora of disparate tools, no one gets an overall view of the projects they’re working on.\n\nGoing DIY isn’t just affecting software development and deployment. It’s also weighing down the business that relies on those products.\n\nThe [problem solver here is the end-to-end platform](/blog/the-devops-platform-for-agile-business/). It’s the next step in DevOps, changing the way people work in a fundamental way.\n\nMigrating from a seat-of-your-pants, DIY system to a simpler, more powerful, single application brings a lot of benefits. Using an end-to-end platform eliminates the time-consuming and costly tangle of tools, breaks down silos, [builds security into every step](/blog/one-devops-platform-can-help-you-achieve-devsecops/) of the development process, and speeds strategic visions into actual working software. The platform enables tech teams to increase efficiency by focusing on delivering software, instead of updating, patching, and stitching together toolchains. \n\n## Eliminating the DevOps tax\n\nMigrating from a complex toolchain to a platform also will eliminate the DevOps tax. \n\nThat refers to the cost that organizations incur when they employ multiple tools and/or multiple toolchains instead of a single, continuous platform. Think about how much time workers spend stitching together and maintaining a toolchain rather than focusing on planning, developing, and deploying software.\n\nHow much are organizations wasting on the dreaded DevOps tax? Too much: our [2022 Global DevSecOps Survey](/developer-survey/) found nearly 40% of devs are spending between one-quarter and one-half of their time integrating and maintaining toolchains, while another 33% spend half to **all** of their time dealing with this issue. Thus it's no surprise that 69% of respondents want to consolidate their toolchains.\n\nA return on investment, or ROI, should come quickly for companies migrating to a DevOps platform, since they will be saving the money that would have been spent watering and feeding a large, complicated tangle of tools. \n\n##  Fostering collaboration\n\nAnother value add to using a DevOps platform is that it will [foster collaboration](/blog/5-ways-collaboration-boosts-productivity-and-your-career/) and shared responsibility. Team members will no longer be working in isolated silos, focused only on their own project – or even just a piece of a project. A DevOps platform enables communication and information sharing. It also adds transparency by giving everyone with a stake in the project a clear view of the progress being made and any challenges being encountered. It also allows for people to make suggestions to share ideas or help clear away obstacles. \n\nA [DevOps platform](/solutions/devops-platform/) will streamline every aspect of the software development lifecycle — from planning to development, testing, deployment, and monitoring. Check out the [Migrating to a DevOps platform playbook](https://page.gitlab.com/migrate-to-devops-guide.html) for more information on replacing your DIY DevOps toolchain with an end-to-end platform.\n",[9,681,875],{"slug":8085,"featured":6,"template":686},"too-many-toolchains-a-devops-platform-migration-is-the-answer","content:en-us:blog:too-many-toolchains-a-devops-platform-migration-is-the-answer.yml","Too Many Toolchains A Devops Platform Migration Is The Answer","en-us/blog/too-many-toolchains-a-devops-platform-migration-is-the-answer.yml","en-us/blog/too-many-toolchains-a-devops-platform-migration-is-the-answer",{"_path":8091,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8092,"content":8098,"config":8102,"_id":8104,"_type":14,"title":8105,"_source":16,"_file":8106,"_stem":8107,"_extension":19},"/en-us/blog/toolchain-security-with-gitlab",{"title":8093,"description":8094,"ogTitle":8093,"ogDescription":8094,"noIndex":6,"ogImage":8095,"ogUrl":8096,"ogSiteName":670,"ogType":671,"canonicalUrls":8096,"schema":8097},"How to overcome toolchain security challenges with GitLab","Use GitLab to control your toolchain sprawl, improve team communication and productivity, and secure your DevOps lifecycle.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673158/Blog/Hero%20Images/toolchain-security-gitlab-cover.jpg","https://about.gitlab.com/blog/toolchain-security-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to overcome toolchain security challenges with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2019-11-20\",\n      }",{"title":8093,"description":8094,"authors":8099,"heroImage":8095,"date":3093,"body":8100,"category":875,"tags":8101},[1016],"\nIntegrated toolchains [are on the rise](https://go.forrester.com/blogs/the-rise-fall-and-rise-again-of-the-integrated-developer-tool-chain/), according to Forrester analyst Christopher Condo. Integrated toolchains actually faded out for a while\nbecause developers wanted to avoid vendor lock in - and because sometimes solutions didn’t [play well with others](/handbook/product/gitlab-the-product/#plays-well-with-others).\nBut today, the growing popularity of CI/CD and open source means more free tools in the software delivery market and dev teams are happily adding them to their arsenal.\n\nUnfortunately, too much of a good thing can be a bad thing. Integrating,\nmanaging, and protecting the DevOps lifecycle has become a burden on many teams.\nIn a recent [Forrester report](/resources/whitepaper-forrester-manage-your-toolchain/),\nover three quarters of survey respondents said their teams use more than two\ntoolchains to support software delivery, and a majority reported that each\ntoolchain is made up of six or more tools.\n\nDevOps fosters innovation but an overly complex toolchain stifles it.\nToolchain maintenance and management shouldn’t consume resources that could\notherwise be invested in product development and innovation, but that’s the reality\non the ground for too many teams.\n\n## Complex toolchains compromise security\n\nManaging these toolchains has become a monumental task, with some businesses\ndevoting 10% of their dev team to toolchain maintenance, according to the Forrester report.\nBesides inhibiting productivity, toolchain complexity also poses a risk to\nyour security posture.\n\nMost teams are tasked with integrating their toolchains by manual means, such\nas plugins and scripts or hard-coded custom integrations. Not only is this\nlabor-intensive, it also adds the significant risk of human error.\nAdditionally, more tools mean more authentication and security requirements to\nmanage, less visibility into the software\nlifecycle, and no view into the process of maintaining the toolchain\nitself - all of which adds unnecessary risk for your IT and dev teams to deal\nwith.\n\nMeanwhile, the consequences of poor security practices are mounting. [According to IBM](https://databreachcalculator.mybluemix.net),\nit takes businesses an average of 279 days to identify and contain a breach,\nat an average cost of $3.9 million.\n\n## DevSecOps with GitLab: your knight in shining armor\n\nLuckily, we’re here to save the day. [GitLab is a single out-of-the-box solution\nfor your **entire** software delivery lifecycle](/stages-devops-lifecycle/) -\nsolving your authentication and requirement woes right off the bat. We’ve built\na number of security and risk prevention measures into many of the DevOps lifecycle\nphases: code reviews, static and dynamic [application security\ntesting](/topics/devsecops/), dependency and container scanning, license compliance, and incident\nmanagement. We also have an exciting array of new features on the horizon, which\ncan be found in the table below.\n\n![GitLab is a complete DevOps platform, delivered as a single application.](https://about.gitlab.com/images/blogimages/toolchain-security-gitlab-inline.png){: .shadow}\n\nDevSecOps is a product of the shift-left movement, integrating security into\nthe earliest possible phases of DevOps. Bringing security in at the beginning\nhelps teams understand where certain testing processes and controls need to\nfall, and helps save time, energy, and resources as you move through the final\nphases of DevOps.\n\nGitLab’s single application eases communication between teams, increases\nvisibility, and streamlines your DevOps lifecycle as a whole. We’re here to\nhelp your teams achieve faster delivery cycles without compromising quality,\nand bring your security practices to the speed of the business.\n\nCover image by [Jukan Tateisi](https://unsplash.com/@tateisimikito) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[9,749,875],{"slug":8103,"featured":6,"template":686},"toolchain-security-with-gitlab","content:en-us:blog:toolchain-security-with-gitlab.yml","Toolchain Security With Gitlab","en-us/blog/toolchain-security-with-gitlab.yml","en-us/blog/toolchain-security-with-gitlab",{"_path":8109,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8110,"content":8115,"config":8120,"_id":8122,"_type":14,"title":8123,"_source":16,"_file":8124,"_stem":8125,"_extension":19},"/en-us/blog/top-10-gitlab-hacks",{"title":8111,"description":8112,"ogTitle":8111,"ogDescription":8112,"noIndex":6,"ogImage":2877,"ogUrl":8113,"ogSiteName":670,"ogType":671,"canonicalUrls":8113,"schema":8114},"Top ten GitLab hacks for all stages of the DevOps Platform","Get the most out of the GitLab DevOps Platform with our ten best tips for enhanced productivity.","https://about.gitlab.com/blog/top-10-gitlab-hacks","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Top ten GitLab hacks for all stages of the DevOps Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2021-10-19\",\n      }",{"title":8111,"description":8112,"authors":8116,"heroImage":2877,"date":8117,"body":8118,"category":769,"tags":8119},[2473],"2021-10-19","\nIt's been ten years since the first commit to GitLab, so we are sharing our ten favorite GitLab hacks to help you get the most out of our DevOps Platform. These are tips for all stages of the development lifecycle, so roll up your sleeves and let's get started.\n\n## Manage faster with quick actions\n\nYou might have adopted keyboard shortcuts for faster navigation and workflows already - if not, check out the GitLab documentation for [platform specific shortcuts](https://docs.gitlab.com/ee/user/shortcuts.html). The knowledge of pressing `r` to land in the reply to comment in text form can be combined with other quick actions, including:\n\n```\n/assign_reviewer @ \u003Csearch username>\n\n/label ~ \u003Csearch label>\n/label ~enhancement ~workflow::indev\n\n/due Oct 8\n\n/rebase\n\n/approve\n\n/merge \n```\n\nQuick actions are also helpful if you have to manage many issues, merge requests and epics at the same time. There are specific actions which allow you to duplicate existing issues, as one example. \n\nTake a deeper dive into [Quick Actions](/blog/improve-your-gitlab-productivity-with-these-10-tips/). \n\n## Plan instructions with templates\n\nDon’t fall into the trap of back-and-forth with empty issue descriptions that leave out details your development teams need to reproduce the error in the best way possible. \n\nGitLab provides the possibility to use so-called [description templates](https://docs.gitlab.com/ee/user/project/description_templates.html) in issues and merge requests. Next to providing a structured template with headings, you can also add [task lists](https://docs.gitlab.com/ee/user/markdown.html#task-lists) which can later be ticked off by the assignee. Basically everything is possible and is supported in GitLab-flavored markdown and HTML.\n\nIn addition to that, you can combine the static description templates with quick actions. This allows you to automatically set labels, assignees, define due dates, and more to level up your productivity with GitLab. \n\n```\n\u003C!-- \nThis is a comment, it will not be rendered by the Markdown engine. You can use it to provide instructions how to fill in the template.\n--> \n\n### Summary \n\n\u003C!-- Summarize the bug encountered concisely. -->\n\n### Steps to reproduce\n\n\u003C!-- Describe how one can reproduce the issue - this is very important. -->\n\n### Output of checks\n\n\u003C!-- If you are reporting a bug on GitLab.com, write: This bug happens on GitLab.com -->\n\n#### Results of GitLab environment info\n\n\u003C!--  Input any relevant GitLab environment information if needed. -->\n\n\u003Cdetails>\n\u003Csummary>Expand for output related to app info\u003C/summary>\n\n\u003Cpre>\n\n(Paste the version details of your app here)\n\n\u003C/pre>\n\u003C/details>\n\n### Possible fixes\n\n\u003C!-- If you can, link to the line of code and suggest actions. →\n\n## Maintainer tasks\n\n- [ ] Problem reproduced\n- [ ] Weight added\n- [ ] Fix in test\n- [ ] Docs update needed\n\n/label ~\"type::bug\"\n```\n\nWhen you manage different types of templates, you can pass along the name of the template in the `issuable_template` parameter, for example `https://gitlab.com/gitlab-org/gitlab/-/issues/new?issuable_template=Feature%20proposal%20%23%20lean`. \n\nAt GitLab, we use description and merge request templates in many ways: [GitLab the project](https://gitlab.com/gitlab-org/gitlab/-/tree/master/.gitlab/issue_templates), [GitLab Corporate Marketing team](https://gitlab.com/gitlab-com/marketing/corporate_marketing/corporate-marketing/-/tree/master/.gitlab/issue_templates), [GitLab team member onboarding](https://gitlab.com/gitlab-com/people-group/people-operations/employment-templates/-/tree/master/.gitlab/issue_templates) and [GitLab product team](https://gitlab.com/gitlab-com/Product/-/tree/main/.gitlab/issue_templates) are just a few examples.\n\n## Create with confidence \n\nWhen reading GitLab issues and merge requests, you may see the abbreviation `MWPS` which means `Merge When Pipeline Succeeds`. This is an efficient way to merge the MRs when the pipeline passes all jobs and stages - you can even combine this workflow with [automatically closing issues](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#closing-issues-automatically) with keywords from the MR.\n\n`Merge When Pipeline Succeeds` also works on the CLI with the `git` command and [push options](https://docs.gitlab.com/ee/user/project/push_options.html). That way you can create a merge request from a local Git branch, and set it to merge when the pipeline succeeds.\n\n```shell\n# mwps BRANCHNAME\nalias mwps='git push -u origin -o merge_request.create -o merge_request.target=main -o merge_request.merge_when_pipeline_succeeds'\n```\n\nCheckout [this ZSH alias example](https://gitlab.com/sytses/dotfiles/-/blob/745ef9725a859dd759059f6ce283e2a8132c9b00/git/aliases.zsh#L24) in our CEO [Sid Sijbrandij](/company/team/#sytses)’s dotfiles repository. There are more push options available, and even more Git CLI tips in [our tools & tips handbook](https://handbook.gitlab.com/handbook/tools-and-tips/#terminal). One last tip: Delete all local branches where the remote branch was deleted, for example after merging a MR.\n\n```shell\n# Delete all remote tracking Git branches where the upstream branch has been deleted\nalias git_prune=\"git fetch --prune && git branch -vv | grep 'origin/.*: gone]' | awk '{print \\$1}' | xargs git branch -d\"\n```\n\nYou are not bound to your local CLI environment; take it to the cloud with [Gitpod](/blog/teams-gitpod-integration-gitlab-speed-up-development/) and either work in VS Code or the pod terminal. \n\n## Verify your CI/CD pipeline\n\nRemember the old workflow of committing a change to `.gitlab-ci.yml` just to see if it was valid, or if the job template really inherits all the attributes? This has gotten a whole lot easier with our new [pipeline editor](https://docs.gitlab.com/ee/ci/pipeline_editor/). Navigate into the `CI/CD` menu and start building CI/CD pipelines right away.\n\nBut the editor is more than just another YAML editor. You’ll get live linting, allowing you to know if there is a missing dash for array lists or a wrong keyword in use before you commit. You can also preview jobs and stages or asynchronous dependencies with `needs` to make your pipelines more efficient.\n\nThe pipeline editor also uses uses the `/ci/lint` API endpoint, and fetches the merged YAML configuration I described earlier in [this blog post about jq and CI/CD linting](/blog/devops-workflows-json-format-jq-ci-cd-lint/). That way you can quickly verify that job templates with [extends](https://docs.gitlab.com/ee/ci/yaml/#extends) and [!reference tags](https://docs.gitlab.com/ee/ci/yaml/yaml_optimization.html#reference-tags) work in the way you designed them. It also allows you to unfold included files, and possible job overrides (for example changing the stage of an [included SAST security template](https://docs.gitlab.com/ee/user/application_security/sast/#overriding-sast-jobs)).\n\nLet’s try a quick example – create a new project and new file called `server.c` with the following content: \n\n```\n#include \u003Cstdio.h>\n#include \u003Cstring.h>\n#include \u003Csys/mman.h>\n#include \u003Csys/stat.h>\n#include \u003Cunistd.h>\n\nint main(void) {\n    size_t pagesize = getpagesize();\n    char * region = mmap(\n        (void*) (pagesize * (1 \u003C\u003C 20)),\n        pagesize,\n        PROT_READ|PROT_WRITE|PROT_EXEC,\n        MAP_ANON|MAP_PRIVATE, 0, 0);\n\n    strcpy(region, \"Hello GitLab SAST!\");\n    printf(\"Contents of region: %s\\n\", region);\n\n    FILE *fp;\n    fp = fopen(\"devops.platform\", \"r\");\n    fprintf(fp, \"10 years of GitLab 🦊 🥳\");\n    fclose(fp);\n    chmod(\"devops.platform\", S_IRWXU|S_IRWXG|S_IRWXO);\n\n    return 0;\n}\n```\n\nOpen the CI/CD pipeline editor and add the following configuration, with an extra `secure` stage assigned to the `semgrep-sast` job for SAST and the C code. \n\n```yaml\nstages:\n    - build\n    - secure\n    - test\n    - deploy\n\ninclude:\n    - template: Security/SAST.gitlab-ci.yml\n\nsemgrep-sast:\n    stage: secure\n```\n\nInspect the `Merged YAML tab` to see the fully compiled CI/CD configuration. You can commit the changes and check the found vulnerabilities too as an async practice :). The examples are available in [this project](https://gitlab.com/gitlab-de/playground/sast-10y-example).\n\n![CI/CD Pipeline editor - Merged YAML](https://about.gitlab.com/images/blogimages/top-10-gitlab-hacks/gitlab_10y_pipeline_editor_view_merged_yaml.png)\nVerify the stage attribute for the job by opening the `view merged YAML` tab in the CI/CD pipeline editor.\n{: .note.text-center}\n\n## Package your applications\n\nThe [package registry](https://docs.gitlab.com/ee/user/packages/) possibilities are huge and there are more languages and package managers to come. Describing why Terraform, Helm, and containers (for infrastructure) and Maven, npm, NuGet, PyPI, Composer, Conan, Debian, Go and Ruby Gems (for applications) are so awesome would take too long, but it's clear there are plenty of choices. \n\nOne of my favourite workflows is to use existing CI/CD templates to publish container images in the GitLab container registry. This makes continuous delivery much more efficient, such as when deploying the application into your Kubernetes cluster or AWS instances. \n\n```yaml\ninclude:\n  - template: 'Docker.gitlab-ci.yml'\n```\n\nIn addition to including the CI/CD template, you can also override the job attributes and define a specific stage and manual non-blocking rules.\n\n```yaml\nstages:\n  - build\n  - docker-build\n  - test\n\ninclude:\n  - template: 'Docker.gitlab-ci.yml'\n\n# Change Docker build to manual non-blocking\ndocker-build:\n  stage: docker-build\n  rules:\n    - if: '$CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH'\n      when: manual \n      allow_failure: true\n```\n\nFor celebrating #10YearsOfGitLab, we have created a [C++ example](https://gitlab.com/gitlab-de/cicd-tanuki-cpp) with an Easter egg on time calculations. This project also uses a Docker builder image to showcase a more efficient pipeline. Our recommendation is to learn using the templates in a test repository, and then create a dedicated group/project for managing all required container images. You can think of builder images which include the compiler tool chain, or specific scripts to run end-to-end tests, etc. \n\n## Secure your secrets\n\nIt is easy to leak a secret by making choices that uncomplicate a unit test by running it directly with the production database. The secret persists in git history, and someone with bad intentions gains access to private data, or finds ways to exploit your supply chain even further. \n\nTo help prevent that, include the CI/CD template for secret detection. \n\n```yaml\nstages:\n    - test\n\ninclude:\n  - template: Security/Secret-Detection.gitlab-ci.yml  \n```\n\nA known way to leak secrets is committing the `.env` file which stores settings and secrets in the repository. Try the following snippet by adding a new file `.env` and create a merge request.\n\n```\nexport AWS_KEY=\"AKIA1318109798ABCDEF\"\n```\n\nInspect the reports JSON to see the raw reports structure. GitLab Ultimate provides an MR integration, a security dashboard overview, and more features to take immediate action. The example can be found in [this project](https://gitlab.com/gitlab-de/playground/secret-scanning-10y-example).\n\n![Secrets Scanning in MR](https://about.gitlab.com/images/blogimages/top-10-gitlab-hacks/gitlab_10y_secrets_scanning.png)\nMR detail view with detected AWS secret from security scanning\n{: .note.text-center}\n\n## Release and continuously deliver (CD)\n\nGitLab’s release stage provides many [features](https://about.gitlab.com/handbook/product/categories/features/#release), including [canary deployments](https://docs.gitlab.com/ee/user/project/canary_deployments.html) and [GitLab pages](https://docs.gitlab.com/ee/user/project/pages/). There are also infrastructure deployments with Terraform and cloud native (protected) [environments](https://docs.gitlab.com/ee/ci/environments/). \n\nWhile working on a CI/CD pipeline efficiency workshop, I got enthusiastic about [parent-child pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html#parent-child-pipelines) allowing non-blocking child pipelines into production, with micro services in Kubernetes as one example. \n\nLet’s try it! Create a new project, and add 2 child pipeline configuration files: `child-deploy-staging.yml` and `child-deploy-prod.yml`. The naming is important as the files will be referenced in the main `.gitlab-ci.yml` configuration file later. The jobs in the child pipelines will sleep for 60 seconds to simulate a deployment. \n\nchild-deploy-staging.yml:\n\n```yaml\ndeploy-staging:\n    stage: deploy\n    script:\n        - echo \"Deploying microservices to staging\" && sleep 60\n```\n\nchild-deploy-prod.yml\n\n```yaml\ndeploy-prod:\n    stage: deploy\n    script:\n        - echo \"Deploying microservices to prod\" && sleep 60\n\nmonitor-prod:\n    stage: deploy\n    script:\n        - echo \"Monitoring production SLOs\" && sleep 60\n```\n\nNow edit the `.gitlab-ci.yml` configuration file and create a build-test-deploy stage workflow.\n\n```yaml\nstages:\n  - build\n  - test\n  - deploy\n\nbuild:\n  stage: build\n  script: echo \"Build\"\n\ntest:\n  stage: test \n  script: echo \"Test\"\n\ndeploy-staging-trigger:\n  stage: deploy\n  trigger:\n    include: child-deploy-staging.yml\n  #rules:\n  #  - if: $CI_MERGE_REQUEST_ID\n\ndeploy-prod-trigger:\n  stage: deploy\n  trigger:\n    include: child-deploy-prod.yml\n    #strategy: depend\n  #rules:\n  #  - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH   \n```\n\nCommit the changes and inspect the CI/CD pipelines. \n\n![Parent-child Pipelines](https://about.gitlab.com/images/blogimages/top-10-gitlab-hacks/gitlab_10y_parent_child_pipelines.png)\nView parent-child pipelines in GitLab\n{: .note.text-center}\n\n`strategy: depends` allows you to make the child pipelines blocking again, and the parent child pipeline waits again. Try uncommenting this for the prod job, and verify that by inspecting the pipeline view. [Rules](https://docs.gitlab.com/ee/ci/yaml/#rules) allow refining the scope when jobs are being run, such as when staging child pipelines that should only be run in merge requests and the prod child pipeline only gets triggered when on the default main branch. The full example can be found in [this project](https://gitlab.com/gitlab-de/playground/parent-child-pipeline-10y-example).\n\nTip: You can use [resource_groups](/blog/introducing-resource-groups/) to limit production deployments from running concurrent child pipelines. \n\n## Configure your infrastructure\n\nTerraform allows you to describe, plan and apply the provisioning of infrastructure resources. The workflow requires a state file to be stored over steps, where the [managed state in GitLab](https://docs.gitlab.com/ee/user/infrastructure/iac/terraform_state.html) as an HTTP backend is a great help, together with predefined container images and CI/CD templates to make [Infrastructure as code](https://docs.gitlab.com/ee/user/infrastructure/iac/) as smooth as possible.\n\nYou can customize the template, or copy the CI/CD configuration into .gitlab-ci.yml and modify the steps by yourself. Let’s try a quick example with only an AWS account and an IAM user key pair. Configure them as CI/CD variables in `Settings > CI/CD > Variables`: `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`.\n\nNext, create the `backend.tf` file and specify the http backend and AWS module dependency.\n\n```terraform\nterraform {\n  backend \"http\" {\n  }\n\n  required_providers {\n    aws = {\n      source = \"hashicorp/aws\"\n      version = \"~> 3.0\"\n    }\n  }\n}\n```\n\nCreate `provider.tf` to specify the AWS region.\n\n```terraform\nprovider \"aws\" {\n  region = \"us-east-1\"\n}\n```\n\nThe `main.tf` describes the S3 bucket resources.\n\n```terraform\nresource \"aws_s3_bucket_public_access_block\" \"publicaccess\" {\n  bucket = aws_s3_bucket.demobucket.id\n  block_public_acls = false\n  block_public_policy = false\n}\n\nresource \"aws_s3_bucket\" \"demobucket\" {\n  bucket = \"terraformdemobucket\"\n  acl = \"private\"\n}\n```\n\nTip: You can verify the configuration locally on your CLI by commenting out the HTTP backend above.\n\nFor GitLab CI/CD, open the pipeline editor and use the following configuration: (Note that it is important to specify the `TF_ROOT` and `TF_ADDRESS` variables since you can [manage multiple Terraform state files](https://docs.gitlab.com/ee/user/infrastructure/iac/terraform_state.html#configure-the-backend)). \n\n```yaml\nvariables:\n  TF_ROOT: ${CI_PROJECT_DIR}\n  TF_ADDRESS: ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/terraform/state/${CI_PROJECT_NAME}\n\ninclude:\n    - template: Terraform.latest.gitlab-ci.yml\n\nstages:\n  - init\n  - validate\n  - build\n  - deploy\n  - cleanup\n\ndestroy:\n    stage: cleanup\n    extends: .terraform:destroy \n    when: manual\n    allow_failure: true\n```\n\nCommit the configuration and inspect the pipeline jobs. \n\n![Terraform pipeline AWS S3 bucket](https://about.gitlab.com/images/blogimages/top-10-gitlab-hacks/gitlab_10y_terraform_state_cicd_pipeline_aws_s3_bucket.png)\nAWS S3 bucket provisioned with Terraform in GitLab CI/CD \n{: .note.text-center}\n\nThe `destroy` job is not created in the template and therefore explicitly added as a manual job. It is recommended to review the opinionated Terraform CI/CD template and copy the jobs into your own configuration to allow for further modifications or style adjustments.  The full example is located in [this project](https://gitlab.com/gitlab-de/playground/terraform-aws-state-10y-example).\n\n![GitLab managed Terraform states](https://about.gitlab.com/images/blogimages/top-10-gitlab-hacks/gitlab_10y_terraform_state_cicd_overview.png)\nView the Terraform states in GitLab\n{: .note.text-center}\n\nHat tipping to our Package stage - you can manage and publish [Terraform modules in the registry](https://docs.gitlab.com/ee/user/packages/terraform_module_registry/) too, using all of the DevOps Platform advantages. And hot off the press, the [GitLab Kubernetes Operator is generally available](/blog/open-shift-ga/). \n\n## Monitor GitLab and dive into Prometheus\n\nPrometheus is a monitoring solution which collects metrics from `/metrics` HTTP endpoints made available by applications, as well as so-called exporters to serve services and host information in the specified metrics format. One example is CI/CD pipeline insights to analyse bottlenecks and [make your pipelines more efficient](https://docs.gitlab.com/ee/ci/pipelines/pipeline_efficiency.html). The [GitLab CI Pipeline Exporter project](https://github.com/mvisonneau/gitlab-ci-pipelines-exporter/tree/main/examples/quickstart) has a great quick start in under 5 minutes, bringing up demo setup with Docker-compose, Prometheus and Grafana. From there, it is not far into your production monitoring environment, and monitoring more of GitLab. \n\n![GitLab CI Exporter](https://about.gitlab.com/images/blogimages/top-10-gitlab-hacks/gitlab_10y_ci_pipeline_exporter_prometheus.png)\nExample dashboard for the GitLab CI Pipeline Exporter\n{: .note.text-center}\n\nThe Prometheus Exporter uses the [Go client libraries](https://prometheus.io/docs/instrumenting/writing_exporters/). They can be used to write your own exporter, or instrument your application code to expose `/metrics`. When deployed, you can use Prometheus again to monitor the performance of your applications in Kubernetes, as one example. Find more monitoring ideas in my talk “[From Monitoring to Observability: Left Shift your SLOs](https://docs.google.com/presentation/d/1LPb-HPMgbc8_l98VjMEo5d0uYlnNnAtJSURngZPWDdE/edit)”. \n\n## Protect\n\nYou can enable security features in GitLab by including the CI/CD templates one by one. A more easy way is to enable [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) and use the default best practices for [security scans](https://docs.gitlab.com/ee/user/application_security/index.html#security-scanning-with-auto-devops). This includes [container scanning](https://docs.gitlab.com/ee/topics/autodevops/stages.html#auto-container-scanning) ensuring that application deployments are not vulnerable on the container OS level. \n\nLet’s try a quick example with a potentially vulnerable image, and the Docker template tip from the Package stage above. Create a new `Dockerfile` in a new project:\n\n```yaml\nFROM debian:10.0 \n```\n\nOpen the pipeline editor and add the following CI/CD configuration:\n\n```yaml\n# 1. Automatically build the Docker image\n# 2. Run container scanning. https://docs.gitlab.com/ee/user/application_security/container_scanning/index.html\n# 3. Inspect `Security & Compliance > Security Dashboard`\n\n# For demo purposes, scan the latest tagged image from 'main'\nvariables:\n    DOCKER_IMAGE: $CI_REGISTRY_IMAGE:latest    \n\ninclude:\n    - template: Docker.gitlab-ci.yml\n    - template: Security/Container-Scanning.gitlab-ci.yml\n```\n\nThe full example is located in [this project](https://gitlab.com/gitlab-de/playground/container-scanning-10y-example).\n\nTip: Learn more about [scanning container images in a deployed Kubernetes cluster](https://docs.gitlab.com/ee/user/application_security/container_scanning/) to stay even more safe. \n\n![Container Scanning Vulnerability Report](https://about.gitlab.com/images/blogimages/top-10-gitlab-hacks/gitlab_10y_container_scanning_vulnerability_report.png)\nView the container scanning vulnerability report\n{: .note.text-center}\n\n## What’s next?\n\nWe have tried to find a great “hack” for each stage of the DevOps lifecycle. There are more hacks and hidden gems inside GitLab - share yours and be ready to explore more stages of the DevOps Platform.\n\nCover image by [Alin Andersen](https://unsplash.com/photos/diUGN5N5Rrs) on [Unsplash](https://unsplash.com)\n",[978,9,683],{"slug":8121,"featured":6,"template":686},"top-10-gitlab-hacks","content:en-us:blog:top-10-gitlab-hacks.yml","Top 10 Gitlab Hacks","en-us/blog/top-10-gitlab-hacks.yml","en-us/blog/top-10-gitlab-hacks",{"_path":8127,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8128,"content":8134,"config":8139,"_id":8141,"_type":14,"title":8142,"_source":16,"_file":8143,"_stem":8144,"_extension":19},"/en-us/blog/top-10-technical-articles-of-2022",{"title":8129,"description":8130,"ogTitle":8129,"ogDescription":8130,"noIndex":6,"ogImage":8131,"ogUrl":8132,"ogSiteName":670,"ogType":671,"canonicalUrls":8132,"schema":8133},"Top 10 technical articles of 2022","Let’s review our fantastic year of how-to guides. From fixing failed pipelines to making the best use of GitOps, we have you covered with our in-depth tutorials.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663736/Blog/Hero%20Images/a-deep-dive-into-the-security-analyst-persona.jpg","https://about.gitlab.com/blog/top-10-technical-articles-of-2022","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Top 10 technical articles of 2022\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-12-08\",\n      }",{"title":8129,"description":8130,"authors":8135,"heroImage":8131,"date":8136,"body":8137,"category":791,"tags":8138},[851],"2022-12-08","\nWith 2022 coming to a close, we wanted to ensure everyone gets one more chance to explore our top 10 technical blog posts of the year. Roll up your sleeves and enjoy our most-viewed how-to articles and don’t forget to bookmark them for next year!\n\n## 1. Failed pipeline? \n\nWe have *all* been there, and not much is more frustrating than that red X. Staff Developer Evangelist [Brendan O’Leary](/company/team/#brendan) offers his best advice on troubleshooting the “why?” of a GitLab failed pipeline – it starts with keeping the right perspective. So many factors are involved in code development that it’s critical to ask all of the questions: Is it the code? Is it the test? Is it a vulnerability, etc.?\n\n[How to troubleshoot a GitLab pipeline failure](/blog/how-to-troubleshoot-a-gitlab-pipeline-failure/)\n\n## 2. Why Git Rebase is your BFF\n\nWith code review increasingly important to successful DevOps, Senior Backend Engineer (Gitaly) [Christian Couder](/company/team/#chriscool) thinks devs might be forgetting a secret weapon in their IDE: Git Rebase. Learn how to rework commits with Git Rebase, including expert tips to try different instructions like ‘reword’, ‘edit’, and ‘squash’.\n\n[Take advantage of Git Rebase](/blog/take-advantage-of-git-rebase/)\n\n## 3. Alert fatigue is real\n\nFollow along with Senior Site Reliability Engineer [Steve Azzopardi](/company/team/#steveazz) as he lays out a GitLab investigation into annoying, time-consuming (and customer-facing) 502 errors in the GitLab Pages logs. To uncover the problem, Azzopardi and team had to unearth some red herrings along the way, but ultimately discovered the importance of PID 1 in a container.\n\n[How we reduced 502 errors by caring about PID 1 in containers](/blog/how-we-removed-all-502-errors-by-caring-about-pid-1-in-kubernetes/)\n\n## 4. More pipelines = less complexity\n\nCI/CD is at the heart of most modern DevOps practices, but that doesn’t mean it’s a “set it and forget it.” Staff Backend Engineer Fabio Pittino acknowledges the complexity challenges of CI/CD and suggests the solution is choosing the right pipelines for the job. Understand the differences between parent-child and multi-project pipelines to streamline your CI/CD efforts.\n\n[Breaking down CI/CD complexity with parent-child and multi-project pipelines](/blog/parent-child-vs-multi-project-pipelines/)\n\n## 5. Hacking and bug bounties\n\nHow did a Swedish web developer go from zero to number seven on our HackerOne Top 10 list in just over a year? Johan Carlsson offers a detailed look at how and why he started looking for bugs in GitLab in his spare time, and how others can jump into hacking, too.\n\n[Want to start hacking? Here’s how to quickly dive in](/blog/cracking-our-bug-bounty-top-10/)\n\n## 6. Gitlab… on an iPad\n\nYes, you can code on an M1-chip-based iPad, and Staff Developer Evangelist Brendan O’Leary walks through all the necessary steps to get GitLab running using GitPod.\n\n[How to code, build, and deploy from an iPad using GitLab and GitPod](/blog/how-to-code-build-and-deploy-from-an-ipad-using-gitlab-and-gitpod/)\n\n## 7. Speed up database changes\n\nMany DevOps teams have mastered speedy application code changes but have struggled to make database updates equally streamlined. In this step-by-step guide, you’ll learn how to apply DevOps principles to database change management.\n\n[How to bring DevOps to the database with GitLab and Liquibase](/blog/how-to-bring-devops-to-the-database-with-gitlab-and-liquibase/)\n\n## 8. A primer on IaC security\n\nInfrastructure as Code (IaC) is an increasingly popular solution for DevOps teams, and with good reason: It’s an efficient and low-resource solution. But, as Senior Developer Evangelist [Michael Friedrich](/company/team/#dnsmichi) explains, it’s also ripe with potential security vulnerabilities. Friedrich takes an exhaustive look at the threats, tools, integrations, and strategies that make IaC a safer choice.\n\n[Fantastic Infrastructure as Code security attacks and how to find them](/blog/fantastic-infrastructure-as-code-security-attacks-and-how-to-find-them/)\n\n## 9. Everything you need to know about GitOps \n\nWant to know how to make GitLab work with GitOps? Senior Product Manager (Configure) [Viktor Nagy](/company/team/#nagyv-gitlab) created an eight-part tutorial covering everything GitLab and GitOps, culminating in how to make a GitLab agent for Kubernetes self-managing. \n\n[The ultimate guide to GitOps with GitLab](/blog/the-ultimate-guide-to-gitops-with-gitlab/)\n\n## 10. The skinny on static site generators\n\nDevs will get the most out of GitLab Pages by choosing the right static site generator (SSG). Developer Evangelist [Fatima Sarah Khalid](/company/team/#sugaroverflow) reviews six options and has also created a toolkit to help make the SSG evaluation process easier.\n\n[How to choose the right static site generator](/blog/comparing-static-site-generators/)\n\n",[9,978,1789],{"slug":8140,"featured":6,"template":686},"top-10-technical-articles-of-2022","content:en-us:blog:top-10-technical-articles-of-2022.yml","Top 10 Technical Articles Of 2022","en-us/blog/top-10-technical-articles-of-2022.yml","en-us/blog/top-10-technical-articles-of-2022",{"_path":8146,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8147,"content":8152,"config":8157,"_id":8159,"_type":14,"title":8160,"_source":16,"_file":8161,"_stem":8162,"_extension":19},"/en-us/blog/top-10-ways-machine-learning-may-help-devops",{"title":8148,"description":8149,"ogTitle":8148,"ogDescription":8149,"noIndex":6,"ogImage":7876,"ogUrl":8150,"ogSiteName":670,"ogType":671,"canonicalUrls":8150,"schema":8151},"Top 10 ways machine learning may help DevOps","Is machine learning part of your DevOps plan? Here are some ways ML could fit right in.","https://about.gitlab.com/blog/top-10-ways-machine-learning-may-help-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Top 10 ways machine learning may help DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-02-14\",\n      }",{"title":8148,"description":8149,"authors":8153,"heroImage":7876,"date":8154,"body":8155,"category":679,"tags":8156},[745],"2022-02-14","_This post is meant as a general introduction to DevOps and machine learning, but does not represent GitLab’s roadmap with ModelOps. Read more about [our ModelOps plans](/blog/introducing-modelops-to-solve-data-science-challenges/)._\n\nLike a superhero’s cape, machine learning can enhance the innate powers of your DevOps program. \n\nYes, it’s early days, and no, machine learning can’t do everything you may want it to – yet. But if you [start using ML tools now](/topics/devops/the-role-of-ai-in-devops/), you’ll be poised to make it a full-fledged participant in your DevOps team as the technology continues to mature. Here are some things ML can help with today.\n\n1. **Make sense of your test data.** Whether it’s regression, unit, functional, or user acceptance testing, ML can help sort through the data generated from those tests, find patterns, figure out the coding problems that caused any bugs, and alert the troops. \n\n2. **Manage your help-desk alerts.** You can teach ML about the factors that make up different types of alerts and automatically route alerts to the best-qualified (mostly human) problem-solver, be it the service desk or a networking guru. Some ML systems can also fix problems without human intervention, based on rules you create.\n\n3. **Put the security into “DevSecOps.”** ML algorithms can, in real time, look through the massive amount of information generated from your security software and network logs and determine if there’s a breach long before a human could. The ML software compares the usual network-traffic baseline to what it’s seeing currently and detects when there’s an attack, or it can tell you if the amount of code in an app or system has suddenly grown to double its size when it shouldn’t have. ML can also triage the problems it finds, as well as take actions to correct security issues based on your guidelines. Further, ML tools can also help ensure your governance rules are followed and create a detailed audit trail.\n\n4. **Gather user requirements.** Natural language processing has come a long way, and can collect, validate, and track documents to streamline the process of figuring out what users are asking for. The technology can also help detect incomplete requirements or wonky timelines and can translate user wants and needs into highly technical project requirements. This makes the entire project-management process more efficient.\n\n5. **Help with pesky dev details.** No, not to replace developers, of course – at least not yet. But ML can learn from past apps you’ve created to recommend security guardrails and how to make software scale and perform better, among other things. Developers definitely see this trend coming, and in [GitLab’s 2021 Global DevSecOps Survey](/developer-survey/), around a third said that an understanding of AI or ML is the most important skill for their future careers. ML-powered code completion tools are already on the market, which provide suggestions for app developers.\n\n6. **Automate testing and create test data.** ML can automatically create the tests you need for QA and the test cases they’re based on, generate and manage test data, and automate code reviews. Natural language processing can help you review test cases and eliminate duplicates, as well as identify gaps in test coverage. Teams will continue to use machine learning models to [make test automation smarter](https://www.forrester.com/blogs/predictions-2021-software-developers-face-mounting-pressure/) , Forrester Research predicts.\n\n7. **Reduce complexity and allow better communication throughout the software chain.** ML can smooth out the rough edges among teams responsible for different parts of the process, and act as an Esperanto of sorts to allow people to speak to each other using the same language. No more, “It worked on my machine.” \n\n8. **Save time on manual provisioning.** Sure the cloud makes this easier, but ML can provision what it thinks you’ll need before you actually need it. \n\n9. **Improve software and product quality.** ML can help find issues like resource leaks, wasted CPU cycles, and other problems, so you can optimize your code before it hits production. At Facebook, [a bug detection tool](https://www2.deloitte.com/us/en/insights/focus/signals-for-strategists/ai-assisted-software-development.html/#:~:text=AI%20is%20helping%20to%20make%20better%20software%20Professionals%20are%20using,in%20design,%20development,%20and%20deployment&text=Artificial%20intelligence%20isn't%20writing,develop%20and%20test%20custom%20software.) predicts defects and suggests remedies that prove correct 80% of the time, Deloitte reports. And the IEEE ran a study from Google X about an ML method that [predicts failures of individual components](https://ieeexplore.ieee.org/document/7448033) that was “far more accurate than the traditional MTBF approach.” \n\n10. **Integrate your workflows and allow continuous improvement.** Some DevOps teams are using ML to analyze all development, operational, and test tools to find any gaps, as well as where pieces of the pipeline need to be better integrated and where APIs are still needed. ML algorithms can help teams figure out why some projects go very well, and others don’t. You can use ML to monitor your monitors and make sure they’re fully operational. Further, ML continues to learn from its training models – both the ones you provide and those it learns on its own as it goes – to continue to help you provide better products and services over time. And when you get down to it, isn’t that the whole point of technology?\n\n_Our [2022 Global DevSecOps Survey](/developer-survey/) is out now! Learn the latest in DevOps insights from over 5,000 DevOps professionals._",[9,771,681,1181],{"slug":8158,"featured":6,"template":686},"top-10-ways-machine-learning-may-help-devops","content:en-us:blog:top-10-ways-machine-learning-may-help-devops.yml","Top 10 Ways Machine Learning May Help Devops","en-us/blog/top-10-ways-machine-learning-may-help-devops.yml","en-us/blog/top-10-ways-machine-learning-may-help-devops",{"_path":8164,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8165,"content":8171,"config":8176,"_id":8178,"_type":14,"title":8179,"_source":16,"_file":8180,"_stem":8181,"_extension":19},"/en-us/blog/top-five-takeaways-from-the-developer-survey",{"title":8166,"description":8167,"ogTitle":8166,"ogDescription":8167,"noIndex":6,"ogImage":8168,"ogUrl":8169,"ogSiteName":670,"ogType":671,"canonicalUrls":8169,"schema":8170},"Top 5 takeaways from the 2018 Developer Survey","GitLab's director of product marketing discusses the challenges facing DevOps adoption and other key findings from our 2018 Developer Survey.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680105/Blog/Hero%20Images/top-five-takeaways-blog-image.jpg","https://about.gitlab.com/blog/top-five-takeaways-from-the-developer-survey","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Top 5 takeaways from the 2018 Developer Survey\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2018-05-14\",\n      }",{"title":8166,"description":8167,"authors":8172,"heroImage":8168,"date":8173,"body":8174,"category":679,"tags":8175},[3485],"2018-05-14","\n_Our [2022 Global DevSecOps Survey](/developer-survey/) has the latest insights from over 5,000 DevOps professionals._\n\nWhile the merits of cross-functional workflows are becoming more accepted in the software development space, it still has quite a way to go. In fact, [GitLab’s survey of 5,000 software professionals](/developer-survey/previous/2018/) found that only 23 percent of respondents are working with a DevOps workflow.\n\nThis is one of five top takeaways from the annual report on software development trends and the impact continuous integration and automation have on the way IT teams work.\n\n- [What’s in the webcast](#whats-in-the-webcast)\n- [Watch the recording](#watch-the-recording)\n- [Top takeaways](#top-takeaways)\n\n## What’s in the webcast\n\nThe discussion kicks off with the differing outlooks managers and developers have on DevOps adoption and the source of bottlenecks in the development process. We move on to highlight the distinctions between high- and low-performing teams and the role open source tools have in software development. The discussion then delves into the way continuous integration helps teams get working code out of the door faster.\n\n## Watch the recording\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/7hgoeV6LcFo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Top takeaways\n\n### Managers are more optimistic about their DevOps adoption progress than developers\n\n>Companies tend to look at DevOps as the next transformational methodology that's going to solve all software delivery problems, and of course there's a lot of truth in that when done really well. What we're finding is when you actually go and survey these organizations, managers and the management layer seem to have a more optimistic view of how they are progressing and what they can do with it. And though developers find the promise in it, they tend to agree less with the optimism of management. From our perspective, that makes a lot of sense because developers are in the trenches tooling, retooling, trying to configure, making that CD pipeline work, always kind of running into different roadblocks and trying to solve that all the time. So, although they're excited, I think their viewpoint is not necessarily as rosy about it when compared to management.\n\n### Developers say most delays in the development process are in the testing phase, while managers say the majority of bottlenecks are attributed to the planning process\n\n>Everybody acknowledges that there are bottlenecks and delays in this development pipeline. When doing DevOps, you still get stuck. But where they actually encounter these delays and bottlenecks varied from team to team. The majority of this was in testing, the next one was planning. Development, operations, and practitioner teams actually found most of the bottlenecks and delays in their actual phases of work, whether this was testing the plan to production, etcetera. Management was found to be more frustrated and concerned about the planning phase of getting things kick started. - Ashish Kuthiala\n\n>Fifty-two percent of people say that testing is where they encounter the most delays. I don't think that's a number to be taken lightly. This is why continuous testing, automated testing is such a big piece of the DevOps software development lifecycle. If that's the single biggest cause for delay and we can automate more of that testing, the time it takes has got to come down. - Alan Shimel\n\n### Open source tools play an integral role in the software development process\n\n>We're finding that open source tools are becoming a very critical component that developers choose to help solve their problems. People are starting to look at tools that they can integrate with their stack and modify or contribute to; and they want to be recognized as well. So they're starting to turn to tools that are malleable, tools that they can use and understand what's underneath the hood. There's a good community around open source because as developers face problems, they can ask their peers for help and also help others. - Ashish Kuthiala\n\n### Teams that self-identify as high performing do DevOps well\n\n>Teams that move fast work on smaller pieces of code and get them out of production quickly, i.e. they do DevOps well and they assess themselves as higher performing teams ...\nFor these teams that do well, we found that removing roadblocks in the development process starts with continuous integration. If you are doing continuous integration well and automating that portion of the lifecycle along with others, it makes a huge impact in removing bottlenecks. You have to ship and get the code or the configuration change production ready right away. The more you wait, the more it piles it up and the harder it becomes. - Ashish Kuthiala\n\nPhoto by [Caspar Rubin](https://unsplash.com/photos/fPkvU7RDmCo) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[9,681,999,683,749],{"slug":8177,"featured":6,"template":686},"top-five-takeaways-from-the-developer-survey","content:en-us:blog:top-five-takeaways-from-the-developer-survey.yml","Top Five Takeaways From The Developer Survey","en-us/blog/top-five-takeaways-from-the-developer-survey.yml","en-us/blog/top-five-takeaways-from-the-developer-survey",{"_path":8183,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8184,"content":8189,"config":8193,"_id":8195,"_type":14,"title":8196,"_source":16,"_file":8197,"_stem":8198,"_extension":19},"/en-us/blog/top-reasons-for-software-release-delays",{"title":8185,"description":8186,"ogTitle":8185,"ogDescription":8186,"noIndex":6,"ogImage":5535,"ogUrl":8187,"ogSiteName":670,"ogType":671,"canonicalUrls":8187,"schema":8188},"Top reasons for software release delays","In our 2022 Global DevSecOps survey, DevOps pros shared their frustrations with software releases, including security's shift left and complicated code reviews.","https://about.gitlab.com/blog/top-reasons-for-software-release-delays","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Top reasons for software release delays\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-08-30\",\n      }",{"title":8185,"description":8186,"authors":8190,"heroImage":5535,"date":2940,"body":8191,"category":769,"tags":8192},[851],"\n_What’s the most likely reason for a software release delay?_\n\nFrom 2019 through 2021, respondents to our Global DevSecOps Surveys _always_ blamed software testing. This year, however, was dramatically different.\n\nMore than 5,000 DevOps practitioners took our [2022 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/previous/2022/), and, for the first time, they offered five equally valid reasons why releases might be tardy: code development, code review, security analysis, test data management, and, of course, testing. \n\nProcesses and priorities are clearly changing in DevOps teams today, and they’re affecting release delays. Here’s how to understand the forces at work.\n\n> Join us at [GitLab Commit 2022](/events/commit/) and connect with the ideas, technologies, and people that are driving DevOps and digital transformation.\n\n## Code development and code review\n\nOver the past three years, code development and code review were the second- and third-ranked culprits for release delays. That’s to be expected: No one ever said code development was easy and code reviews have always been problematic.\n\nDevelopers report [a myriad of challenges with code review](/blog/the-code-review-struggle-is-real-heres-what-you-need-to-know/): It’s too labor intensive, no one is available to do it, and the culture often doesn’t support the process. But in this year’s survey, 76% of developers said they find code reviews “very” or “somewhat” valuable, and a majority said code review was one of the key steps in DevOps they wish they could do more of. All told, 27% of developers review code weekly while another 21% review it daily or with every commit.\n\nClearly, code review is important but [it takes work](/blog/tips-for-better-code-review/) to make them happen more efficiently. One up-and-coming solution that could help make code reviews easier is artificial intelligence. Our survey found 31% of DevOps teams use AI for code review today, more than double the percentage in 2021. GitLab is also excited about the possibilities found in AI’s close cousin machine learning – we’re using it to [improve the code review process](/blog/the-road-to-smarter-code-reviewer-recommendations/). \n\n## Keeping software secure\n\nCreating safe code requires security testing and the frustration around this step is both real and longstanding. Security has nearly always [been seen as a “blocker”](/blog/developer-security-divide/) when it comes to software development in general and software releases in particular. In our 2022 survey, though, priorities have changed. Security is now the top area DevOps teams plan to invest in this year, and a majority of developers report that the most difficult part of their job is keeping software secure. Here’s just a sample of what developers had to say about the challenges of their roles today: \n\n_We are trying to keep up with the latest tools and security for optimal performance and privacy._\n\n_We are trying to build applications that are secure and stable._\n\n_It is challenging to keep it secure and keep it updated._\n\n_Cyber security attacks are the biggest challenge facing us today._\n\n_Data security, data security, I repeat, data security._\n\nThe focus on security isn’t just talk, either. More than 50% of DevOps teams are running SAST, DAST, and container scans, all dramatic increases from 2021. But at the same time, this is the fourth year security pros have continued to blame developers for finding too few bugs too late in the process. Security is a developer performance metric for many teams, but sec team members say it is still very hard to get devs to actually fix bugs, a trend we’ve seen reflected over and over.\n\nIn other words, it’s complicated enough to make the potential of delays unsurprising.\n\n## Managing the test data\n\nToo much test data is one of those good and bad problems to have: 47% of DevOps teams we surveyed report full test automation, nearly double the percentage from last year, and more security scans are being run too. More than half of survey takers (53%) are testing their code as it’s being written, up 21% from last year.\n\nAll those tests result in a data management problem most teams aren’t actually set up to handle. Here’s one example: Less than one-third of teams are able to put DAST and SAST results into a developer’s workflow/IDE and those percentages remain stubbornly low year after year. \n\nTesting momentum and automation are growing by leaps and bounds, but teams now need better ways to evaluate, communicate, and act on the data.\n\n## The tricky nature of software testing\n\nSoftware testing has often worn the “DevOps scapegoat” mantle, and perhaps for good reason. Getting testing just right is critical, but it’s also elusive. There are so many kinds of tests teams can run, test automation requires a big process and culture investment, and test results are often seen as “flaky,” “noisy,” and “late” by busy developers not enthused about context switching or inaccurate results. \n\nBut there are a couple of promising signs: As we saw in 2021, developer respondents told us again this year that testing is high on their list of tasks they would like to do more of. And artificial intelligence is also making inroads: About 37% of teams are using AI/ML to test their code (a 23-point jump from 2021) and 20% more are planning to add it to their DevOps practice this year.\n\nWant to understand more about software release delays and DevOps best practices? Read our [2022 Global DevSecOps Survey](/developer-survey/).\n",[681,771,3993,9],{"slug":8194,"featured":6,"template":686},"top-reasons-for-software-release-delays","content:en-us:blog:top-reasons-for-software-release-delays.yml","Top Reasons For Software Release Delays","en-us/blog/top-reasons-for-software-release-delays.yml","en-us/blog/top-reasons-for-software-release-delays",{"_path":8200,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8201,"content":8206,"config":8212,"_id":8214,"_type":14,"title":8215,"_source":16,"_file":8216,"_stem":8217,"_extension":19},"/en-us/blog/top-ten-reasons-to-check-out-gitlab-virtual-commit",{"title":8202,"description":8203,"ogTitle":8202,"ogDescription":8203,"noIndex":6,"ogImage":4635,"ogUrl":8204,"ogSiteName":670,"ogType":671,"canonicalUrls":8204,"schema":8205},"Top Ten Reasons to Check Out GitLab's Virtual Commit","An overview of GitLab's Virtual Commit and the content available specific to public sector.","https://about.gitlab.com/blog/top-ten-reasons-to-check-out-gitlab-virtual-commit","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Top Ten Reasons to Check Out GitLab's Virtual Commit\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jim Riley\"}],\n        \"datePublished\": \"2020-09-14\",\n      }",{"title":8202,"description":8203,"authors":8207,"heroImage":4635,"date":8209,"body":8210,"category":1359,"tags":8211},[8208],"Jim Riley","2020-09-14","\n \n{::options parse_block_html=\"true\" /}\n\n \nThis year the GitLab crew stepped away from everything they knew about creating an amazing, winning conference and reworked the Commit vision to better fit in line with the needs of our changed world. The result was an incredible digital experience. Commit transformed into a 24-hour full conference program filled with practical DevOps strategies shared by leaders in development, operations, and security. Why 24-hours? GitLab has customers, partners and contacts all across the globe and the Commit team saw the virtual environment as an opportunity to make certain everyone had access to all the exciting, featured content and our GitLab team in real time.\n \nGitLab customers and partners shared real world examples of how GitLab is helping their organizations innovate, survive, and succeed @ speed. [Login](https://gitlabcommitvirtual.com/) to view the top ten presentations that showcase how Public Sector is leading digital transformation through GitLab.\n \n - Nicolas Chaillan, Chief Software Officer, US Air Force, United States Air Force and his keynote talk  “DevSecOps in Government and Highly Regulated Industries” \n - How The U.S. Army Cyber School Created ‘Courseware-as-Code’ With GitLab \n - Deployment & Adoption of GitLab in Government \n - DevSecOps At The Brazilian Federal Public Ministry...Exclusively With Open Source Tools \n - DevOps 101: Getting to Minimal Viable 'DevOpsness' \n - Scaling DevOps at the NSA \n - Accelerating Speed to Mission Through Low-to-High Cross Domain Collaboration \n - Enabling the Tactical Edge Through DevSecOps in a Box \n - Cloud-Native Security: Processes And Tools To Protect Modern Applications \n - DevOps 101: Getting to Minimal Viable 'DevOpsness \n \nAfter absorbing the presentations shared at Commit, if you’re finding you’d like to dive a little deeper and explore a bit more, [reach out to us](https://about.gitlab.com/company/contact/) and we’ll be happy to connect with you and keep the conversation going!\n \nTo learn more about GitLab Public Sector, please visit: https://about.gitlab.com/solutions/public-sector/\n",[9,2243,875,1477],{"slug":8213,"featured":6,"template":686},"top-ten-reasons-to-check-out-gitlab-virtual-commit","content:en-us:blog:top-ten-reasons-to-check-out-gitlab-virtual-commit.yml","Top Ten Reasons To Check Out Gitlab Virtual Commit","en-us/blog/top-ten-reasons-to-check-out-gitlab-virtual-commit.yml","en-us/blog/top-ten-reasons-to-check-out-gitlab-virtual-commit",{"_path":8219,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8220,"content":8226,"config":8231,"_id":8233,"_type":14,"title":8234,"_source":16,"_file":8235,"_stem":8236,"_extension":19},"/en-us/blog/trends-in-test-automation",{"title":8221,"description":8222,"ogTitle":8221,"ogDescription":8222,"noIndex":6,"ogImage":8223,"ogUrl":8224,"ogSiteName":670,"ogType":671,"canonicalUrls":8224,"schema":8225},"3 Trends in test automation","Faster deployments, fewer bugs, better user experiences – see the latest trends in test automation and what they're bringing to the table.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663662/Blog/Hero%20Images/trends-in-test-automation.jpg","https://about.gitlab.com/blog/trends-in-test-automation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"3 Trends in test automation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-05-01\",\n      }",{"title":8221,"description":8222,"authors":8227,"heroImage":8223,"date":8228,"body":8229,"category":679,"tags":8230},[788],"2019-05-01","\nAutomation is becoming a powerful tool in every industry.\nWith the pace of development at breakneck speed, [test automation](/topics/devops/devops-test-automation/) is a big asset in deploying applications quickly.\nThe volume and complexity of testing environments mean that machines are well-suited for the job, and a modern QA strategy is all about leveraging that automation effectively.\n\n[QASymphony recently surveyed testers and QA leaders](https://www.qasymphony.com/blog/test-automation-trends-infographic/) at mid-size and large enterprises and found that a significant number of respondents expect to be making a big leap towards test automation in the next year:\nAlmost half expect to be automating more than 50 percent in that time.\nThe test automation tool landscape is growing more complex, and 83 percent of organizations are using open source tools.\n\n## 1. Continuous testing\n\nIn traditional environments, testing gets completed at the end of a development cycle.\nAs more teams move toward a [DevOps](/topics/devops/) and [continuous delivery](/topics/ci-cd/) model in which software is constantly in development, leaving testing until the end can be a huge liability.\nIn the time between a project starting and going to testing, master files could have been changed thousands of times.\nWho knows what kinds of bugs can pop up over months of development?\nThis leads to either updates stuck in testing for far too long or deployments filled with bugs – neither of which is good.\nThat’s where continuous testing comes in.\n\nContinuous testing starts at the beginning.\nEach milestone along the way serves as a quality gate, [baking in excellence at each stage of the software development process](https://techbeacon.com/app-dev-testing/state-test-automation-7-key-trends-watch).\nAs each phase clears, more testing happens as needed.\nImplementing continuous testing methodologies is _already_ the biggest trend in test automation, but some organizations that embark on their DevOps journeys struggle with it.\n\nSubu Baskaran, senior product manager for Sencha, says that despite the desire to test early in the cycle, software development teams that are still maintaining legacy applications find it hard to go back and write unit or end-to-end tests:\n\n>\"The millions of lines of code make it extremely difficult for teams to think about unit testing, as that will severely hamper new feature development. Also, legacy applications have inherent complexities that make end-to-end testing very slow, vague, and brittle. [Hence, teams that maintain legacy applications resort to manual testing.](https://techbeacon.com/app-dev-testing/state-continuous-testing-its-journey-not-destination)\"\n\n## 2. Concurrent DevOps\n\nCode quality and speed go hand in hand, and teams must be able to make use of parallelization to keep up the pace.\nSplitting work across multiple servers has never been easier, and organizations will continue to expand their concurrent DevOps approach.\n\nYou could have multiple physical machines to handle the load but [VMs can be a more economical option for automation parallelization](https://techbeacon.com/app-dev-testing/parallelizing-test-automation-read-first).\nWhether those VMs are on premises or cloud-based largely depends on the cost and your company's ability to embrace the cloud.\n\nYou could also work with cloud partners, companies that host cloud-based execution environments\nfor testing and automation.\n\nAutoscaling is one way that teams can reduce the costs associated with running these concurrent jobs.\n[Autoscaling runners](/releases/2016/03/29/gitlab-runner-1-1-released/) split this work across multiple servers and spin up or down automatically to process queues – so developers don’t have to wait on builds and teams use as much capacity as needed.\nThis user [built out a CI testing pipeline using GitLab](https://medium.freecodecamp.org/4-steps-to-build-an-automated-testing-pipeline-with-gitlab-ci-24ccab95535e) that allowed for more effective bug catching, and more DevOps teams will be using these methods to automate their testing environments in years to come.\n\n## 3. AI and machine learning\n\nAt its core, machine learning is a pattern-recognition technology, [the main purpose of which is to make machines learn without being explicitly programmed](https://hackernoon.com/why-ai-ml-will-shake-software-testing-up-in-2019-b3f86a30bcfa).\nWhat makes this such an important trend in test automation is that it can make testing more predictive and reliable.\nWhile Selenium is still the standard for creating testing scripts, it requires a high level of programming skill to maintain.\nAutomation tools like Mabl, [TestCraft](https://www.testcraft.io/), Testim.io, and AutonomiQ are just some of the few incorporating AI and machine learning into test automation.\n\nDan Belcher, co-founder of testing tool company Mabl, and his team [developed an ML testing algorithm that can adapt to changes in frontend elements](https://techbeacon.com/app-dev-testing/how-ai-changing-test-automation-5-examples).\n\"Although Selenium is the most broadly used framework, the challenge with it is that it's pretty rigidly tied to the specific elements on the front end. Because of this, script flakiness can often arise when you make what seems like a pretty innocent change to a UI.\" he explains.\n\"One of the things that we did at the very beginning of creating Mabl was to develop a much smarter way of referring to frontend elements in our test automation so that those types of changes don't actually break your tests.\"\n\nAI and machine learning make it possible to go through millions of lines of code and identify patterns.\nBut what happens to the human testers? QA automation means that testers can devote more time to superior user experiences – the tasks that machines are _not_ always well-suited for.\nThe role of testers is now [ensuring that quality testing processes are being followed](https://www.qasymphony.com/blog/managing-testing-teams/), so it’s more about oversight than conducting actual tests.\nModern QA can be that bridge for beautiful user experiences that are intuitive and appealing.\nWith the volume of applications being deployed every day, having a great user experience is a way to stand out in a sea of apps.\n\n## These trends in test automation are just the tip of the iceberg\n\nThere is no shortage of exciting things happening: more focus on JavaScript testing, improvements in testing across devices, comprehensive testing dashboards, as well as Selenium-free options.\nThe testing automation landscape is full of new solutions, but none of them is viable in an outdated legacy environment.\n\nManual testing reduces application development speed and threatens code quality.\nThese two disadvantages are growth killers, especially in such a competitive development landscape.\nTest automation makes it possible for testers to use their skills where they add more business value: Creating great user experiences.\nLegacy applications can’t tap into all of these test automation capabilities because they aren’t supported.\nOrganizations forced to manually test their code are being left in the dust by those who automate.\n\nThe advantage of using a solution like GitLab is that we can incorporate a variety of continuous testing solutions.\nCustomers have integrated us with SaaS-based testing solutions or even their own homegrown Selenium grids.\nWe also integrate with JavaScript platforms like Cypress.io, and help teams create continuous integration pipelines.\n\nAre you ready to explore these trends in test automation but legacy applications are holding you back?\n\n[Just commit.](/blog/application-modernization-best-practices/)\n{: .alert .alert-gitlab-purple .text-center}\n\nCover image by [Mimi Thian](https://unsplash.com/photos/ZKBzlifgkgw?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/%22developers%22?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,109,683],{"slug":8232,"featured":6,"template":686},"trends-in-test-automation","content:en-us:blog:trends-in-test-automation.yml","Trends In Test Automation","en-us/blog/trends-in-test-automation.yml","en-us/blog/trends-in-test-automation",{"_path":8238,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8239,"content":8245,"config":8251,"_id":8253,"_type":14,"title":8254,"_source":16,"_file":8255,"_stem":8256,"_extension":19},"/en-us/blog/tutorial-automated-release-and-release-notes-with-gitlab",{"title":8240,"description":8241,"ogTitle":8240,"ogDescription":8241,"noIndex":6,"ogImage":8242,"ogUrl":8243,"ogSiteName":670,"ogType":671,"canonicalUrls":8243,"schema":8244},"Tutorial: Automate releases and release notes with GitLab","With the GitLab Changelog API, you can automate the generation of release artifacts, release notes, and a comprehensive changelog detailing all user-centric software modifications.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659978/Blog/Hero%20Images/automation.png","https://about.gitlab.com/blog/tutorial-automated-release-and-release-notes-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: Automate releases and release notes with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ben Ridley\"}],\n        \"datePublished\": \"2023-11-01\",\n      }",{"title":8240,"description":8241,"authors":8246,"heroImage":8242,"date":5865,"body":8248,"category":1180,"tags":8249,"updatedDate":8250},[8247],"Ben Ridley","***2025 update** - The Changelog API has continued to evolve and now has some great new capabilities we don’t cover in this blog, such as the ability to provide custom changelogs with templated values from your commit history. [Discover more in the official Changelogs docs.](https://docs.gitlab.com/user/project/changelogs/)*\n\nWhen you develop software that users rely on, effective communication about changes with each release is essential. By keeping users informed about new features and any modifications or removals, you ensure they maximize the software's benefits and avoid encountering unpleasant surprises during upgrades.\n\nHistorically, creating release notes and maintaining a changelog has been a laborious task, requiring developers to monitor changes externally or release managers to sift through merge histories. With the GitLab Changelog API, you can use the rich history provided in our git repository to easily create release notes and maintain a changelog.\n\nIn this tutorial, we'll delve into automating releases with GitLab, covering the generation of release artifacts, release notes, and a comprehensive changelog detailing all user-centric software modifications.\n\n## Releases in GitLab\nFirst, let's explore how releases work in GitLab.\n\nIn GitLab, a release is a specific version of your code, identified by a git tag, that includes details about changes since the last release (and release notes) and any related artifacts built from that version of the code, such as Docker images, installation packages, and documentation.\n\nYou can create and track releases in GitLab using the UI by calling our Release API or by defining a special `release` job inside a CI pipeline. In this tutorial, we'll use the `release` job in a CI/CD pipeline, which allows us to extend the automation we're using in our pipelines for testing, code scanning, etc. to also perform automated releases.\n\nTo automate our releases, we first need to answer this question: Where are we going to get the information on changes made for our release notes and our changelog? The answer: Our git repository, which provides us with a rich history of development activity through commit messages and merge commit history. Let's see if we can leverage this rich history to automatically create our notes and changelogs.\n\n## Introducing commit trailers\n[Commit trailers](https://git-scm.com/docs/git-interpret-trailers) are structured entries in your git commits, created by adding simple `\u003CHEADER>:\u003CBODY>` format messages to the end of your commit. The `git` CLI tool can then parse and extract these for use in other systems. An example you might have already used is `git commit --sign-off` to sign off on a commit. This is implemented by adding a `Signed-off-by: \u003CYour Name>` trailer to the commit. We can add any arbitrary structured data here, which makes it a great place to store information that could be useful for our changelog.\n\nIn fact, if we use a `Changelog: \u003Cadded/changed/removed>` trailer in our commits, the GitLab Changelog API will parse these and use them to create a changelog for us automatically!\n\nLet's see this in action by making some changes to a real codebase and performing a release, and generating release notes and changelog entries.\n\n## Our example project\nFor the purposes of this blog, I'm using a simple Python web app repository. Let's pretend Version 1.0.0 of the application was just released and is the current version of the code. I've also created a 1.0.0 release in GitLab, which I did manually because we haven't created our automated release pipeline yet:\n\n![A screenshot of the GitLab UI showing a release for Version 1.0.0](https://about.gitlab.com/images/blogimages/2023-08-22-automated-release-and-release-notes-with-gitlab/1-0-release.png)\n\n## Making our changes\nWe're in rapid development mode, so we're going to be working on releasing Version 2.0.0 of our application today. As part of our 2.0.0 release, we're going to be adding a new feature to our app: A chatbot! And we're also going to be removing the quantum blockchain feature, because we only needed that for our first venture capital funding round. Also, we're going to be adding an automated release job to our CI/CD pipeline for our 2.0.0 release.\n\nFirst, let's remove unneeded features. I've created a merge request that contains the necessary removals. Importantly, we need to ensure we have a commit message that includes the `Changelog: removed` trailer. There's a few ways to do this, such as including it directly in a commit, or performing an interactive rebase and adding it using the CLI. But I think the easiest way in our situation is to leave it until the end and then use the `Edit commit message` button in GitLab to add the trailer to the merge commit like so:\n\n![A screenshot the GitLab UI showing a merge request removing unused features](https://about.gitlab.com/images/blogimages/2023-08-22-automated-release-and-release-notes-with-gitlab/remove-unused-features-mr.png)\n\nIf you use this method, you can also change the merge commit title to something more succinct. I've changed the title of my merge commit to 'Remove Unused Features', as this is what will appear in the changelog entry.\n\nNext, let's add some new functionality for the 2.0.0 release. Again, all we need to do is open another merge request that includes our new features and then edit the merge commit to include the `Changelog: added` trailer and edit the commit title to be more succinct:\n\n![A screenshot of the GitLab UI showing a merge request to add new functionality](https://about.gitlab.com/images/blogimages/2023-08-22-automated-release-and-release-notes-with-gitlab/add-chatbot-mr.png)\n\nNow we're pretty much ready to release 2.0.0. But we don't want to create our release manually this time. So before our release we're going to add some jobs to our `.gitlab-ci.yml` file that will perform the release for us automatically, and generate the respective release notes and changelog entries, when we tag our code with a new version like `2.0.0`.\n\n**Note:** If you want to enforce changelog trailers, consider using something like [Danger to perform automated checks for MR conventions](https://docs.gitlab.com/ee/development/dangerbot.html).\n\n## Building an automated release pipeline\nFor our pipeline to work, we need to create a project access token that will allow us to call GitLab's API to generate changelog entries. [Create a project access token with the API scope](https://docs.gitlab.com/ee/user/project/settings/project_access_tokens.html#create-a-project-access-token), and then [store the token as a CI/CD variable](https://docs.gitlab.com/ee/ci/variables/#define-a-cicd-variable-in-the-ui) called `CI_API_TOKEN`. We'll reference this variable to authenticate to the API.\n\nNext, we're going to add two new jobs to our `gitlab-ci.yml` file:\n```yaml\nprepare_job:\n  stage: prepare\n  image: alpine:latest\n  rules:\n  - if: '$CI_COMMIT_TAG =~ /^v?\\d+\\.\\d+\\.\\d+$/'\n  script:\n    - apk add curl jq\n    - 'curl -H \"PRIVATE-TOKEN: $CI_API_TOKEN\" \"$CI_API_V4_URL/projects/$CI_PROJECT_ID/repository/changelog?version=$CI_COMMIT_TAG\" | jq -r .notes > release_notes.md'\n  artifacts:\n    paths:\n    - release_notes.md\n\nrelease_job:\n  stage: release\n  image: registry.gitlab.com/gitlab-org/release-cli:latest\n  needs:\n    - job: prepare_job\n      artifacts: true\n  rules:\n  - if: '$CI_COMMIT_TAG =~ /^v?\\d+\\.\\d+\\.\\d+$/'\n  script:\n    - echo \"Creating release\"\n  release:\n    name: 'Release $CI_COMMIT_TAG'\n    description: release_notes.md\n    tag_name: '$CI_COMMIT_TAG'\n    ref: '$CI_COMMIT_SHA'\n    assets:\n      links:\n        - name: 'Container Image $CI_COMMIT_TAG'\n          url: \"https://$CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG:$CI_COMMIT_SHA\"\n```\n\nIn the above configuration, the `prepare_job` uses `curl` and `jq` to call the GitLab Changelog API endpoint and then passes this to our `release_job` to actually create the release. To break it down further:\n- We use the project access token created earlier to call the GitLab Changelog API, which performs the generation of the release notes and we store this as an artifact.\n- We're using the `$CI_COMMIT_TAG` variable as the version. For this to work, we need to be using semantic versioning for our tags (something like `2.0.0` for example), so you'll notice I've also restricted the release job using a `rules` section that checks for a semantic version tag.\n\t- Semantic versioning is required for the GitLab Changelog API to work. It uses this format to find the most recent release to compare to our current release.\n- We use the official `release-cli` image from GitLab. The release-cli is required to use the `release` keyword in a job.\n- We use the `release` keyword to create a release in GitLab. This is a special job keyword reserved for creating a release and populating the required fields.\n- We can pass a file as an argument to the `description` of the release. In our case, it's the file we generated in the `prepare_job`, which was passed to this job as an artifact.\n- We've also included our container image that is being built earlier in the pipeline as a release asset. You can attach any assets you'd like from your build process, such as binaries or documentation by providing a URL to wherever you've uploaded them earlier in the pipeline.\n\n## Performing an automated release\nWith this setup, all we need to do to perform a release is push a tag to our repository that follows our versioning scheme. You can simply push a tag using the CLI, this example uses GitLab's UI to create a tag on the main branch. Create a tag by selecting Code -> Tags -> New Tag on the sidebar:\n![A screenshot of the GitLab UI illustrating how to create a tag](https://about.gitlab.com/images/blogimages/2023-08-22-automated-release-and-release-notes-with-gitlab/create-2-tag.png)\n\nOn creation, our pipelines will start to execute. The GitLab Changelog API will automatically generate release notes for us as markdown, which contains all the changes between this release and the previous release. Here's the resulting markdown generated in our example:\n\n```md\n## 2.0.0 (2023-08-25)\n\n### added (1 change)\n\n- [Add ChatBot](gl-demo-ultimate-bridley/super-devsecops-incorporated/simply-notes-release-demo@0c3601a45af617c5481322bfce4d71db1f911b02) ([merge request](gl-demo-ultimate-bridley/super-devsecops-incorporated/simply-notes-release-demo!4))\n\n### removed (1 change)\n\n- [Remove Unused Features](gl-demo-ultimate-bridley/super-devsecops-incorporated/simply-notes-release-demo@463d453c5ae0f4fc611ea969e5442e3298bf0d8a) ([merge request](gl-demo-ultimate-bridley/super-devsecops-incorporated/simply-notes-release-demo!3))\n```\n\nAs you can see, GitLab has extracted the entries for our release notes automatically using our git commit trailers. In addition, it's helpfully provided links back to the merge request so readers can see more details and discussion around the changes.\n\nAnd now, our final release:\n![The GitLab release UI showing a release for version 2.0.0](https://about.gitlab.com/images/blogimages/2023-08-22-automated-release-and-release-notes-with-gitlab/2-0-release.png)\n\n## Creating the changelog\nNext, we want to update our changelog (which is basically a collated history of all your release notes). You can use a `POST` request to the changelog API endpoint we used earlier to do this.\n\nYou can do this as part of your release pipeline if you like, for example by adding this to the `script` section of your prepare job:\n```sh\n'curl -H \"PRIVATE-TOKEN: $CI_API_TOKEN\" -X POST \"$CI_API_V4_URL/projects/$CI_PROJECT_ID/repository/changelog?version=$CI_COMMIT_TAG\"\n```\n\n**Note that this will actually modify the repository.** It will create a commit to add the latest notes to a `CHANGELOG.md` file:\n![A screenshot of the repository which shows a commit updating the changelog file](https://about.gitlab.com/images/blogimages/2023-08-22-automated-release-and-release-notes-with-gitlab/changelog-api-commit.png)\n\nAnd we are done! By utilizing the rich history provided by `git` with some handy commit trailers, we can leverage GitLab's powerful API and CI/CD pipelines to automate our release process and generate release notes for us.\n\n> If you’d like to explore the project we used for this article, [you can find the project at this link](https://gitlab.com/gitlab-learn-labs/sample-projects/release-automation-demo).\n",[978,976,109,9,2243,1789],"2025-06-05",{"slug":8252,"featured":6,"template":686},"tutorial-automated-release-and-release-notes-with-gitlab","content:en-us:blog:tutorial-automated-release-and-release-notes-with-gitlab.yml","Tutorial Automated Release And Release Notes With Gitlab","en-us/blog/tutorial-automated-release-and-release-notes-with-gitlab.yml","en-us/blog/tutorial-automated-release-and-release-notes-with-gitlab",{"_path":8258,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8259,"content":8264,"config":8269,"_id":8271,"_type":14,"title":8272,"_source":16,"_file":8273,"_stem":8274,"_extension":19},"/en-us/blog/ubs-gitlab-devops-platform",{"title":8260,"description":8261,"ogTitle":8260,"ogDescription":8261,"noIndex":6,"ogImage":5897,"ogUrl":8262,"ogSiteName":670,"ogType":671,"canonicalUrls":8262,"schema":8263},"How UBS created their own DevOps platform using GitLab","How GitLab helped power more than a million builds in six months on UBS DevCloud.","https://about.gitlab.com/blog/ubs-gitlab-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How UBS created their own DevOps platform using GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2021-08-04\",\n      }",{"title":8260,"description":8261,"authors":8265,"heroImage":5897,"date":8266,"body":8267,"category":791,"tags":8268},[2002],"2021-08-04","\n\nUBS, the largest truly global wealth manager, uses GitLab to power DevCloud, a single [DevOps platform](/solutions/devops-platform/) that allows for a cloud-based, service-oriented, software development lifecycle.\n\n\"GitLab is a fundamental part of DevCloud,\" said [Rick Carey](https://www.bloomberg.com/profile/person/20946258), Group Chief Technology Officer at UBS. \"We wouldn't be able to have that seamless experience without GitLab. It allowed us to pull ahead of many of our competitors, and break down the barriers between coding, testing, and deployment.\"\n\nDuring GitLab Virtual Commit 2021, Rick and [Eric Johnson](/company/team/#edjdev), Chief Technology Officer at GitLab, talked about how building DevCloud on GitLab's DevOps Platform allowed UBS to increase their development velocity, lower their infrastructure costs, and increase collaboration between engineers and non-engineering teams worldwide.\n\n## How engineers used DevCloud to collaborate during UBS Hackathon\n\nThe annual [UBS Hackathon](https://www.ubs.com/global/en/our-firm/what-we-do/technology/2020/hackathon-2020.html), which typically brings together engineers from around the world in one room, went virtual in 2020 due to the COVID-19 pandemic. UBS did a soft launch of the DevCloud platform during the 2020 Hackathon to have a truly global development and seamless team experience among the more than 500 participants dispersed worldwide.\n\n\"It was hard to pick a winner, because nearly every program and team built something absolutely incredible in such a short amount of time,\" said Rick. \"They got so much done that even while chatting with each other, they said, 'I can't believe how easy it is to get this done.'\n\nOnce this Hackathon was successful, we knew that we were going to be able to migrate the rest of our engineers to DevCloud.\"\n\n## Open source collaboration benefitted UBS and GitLab\n\n\"I must say it's uncommon in my experience to see such a large organization let alone one in such a compliance-driven industry as finance take on such a large project and deliver it on time,\" Eric said.\n\nRick attributes part of that success to GitLab's commitment to open source collaboration, which allowed UBS to turn to GitLab team members with questions.\n\n\"In an open source model, every time there was a gap, or an issue, or something we just needed your help with, we could reach out to GitLab and say, 'Can we work on this together? Is there a way to improve this?'\", said Rick. \"That's the value, and that's one of the reasons we went with GitLab.\"\n\nIt wasn't a one-way relationship. Eric said that GitLab learned a lot about compliance and risk processes that are unique to the financial sector by collaborating on open source projects with UBS.\n\n\"Collaboration is one of the GitLab's core values – which was key to this project. We set common goals. We're in constant communication, and we're always working together to remove roadblocks. Working with UBS's engineers is a truly agile experience,\" said Eric.\n\nGitLab forums have a lot of contributions from UBS team members, and both UBS and GitLab are members of open source communities such as the Fintech Open Source Foundation (FINOS) and Cloud Native Computing Foundation (CNCF).\n\n## How adopting DevCloud paid off for UBS\n\nOne of the key messages for why adopting a single DevOps platform such as GitLab or DevCloud benefits engineering teams is the productivity pay-off – for engineers and non-engineers alike.\n\nSimilar to GitLab, which enables simple asynchronous collaboration between team members, DevCloud was built with engineers in mind but so everyone can contribute. Rick said that one of the best pieces of feedback he got on DevCloud was from someone on the business side of UBS, who wanted to do some development projects but struggled with other tools.\n\n\"He said, 'Oh, that's DevCloud? I love DevCloud,'\" said Rick.\n\nIn the roughly six months since UBS launched DevCloud, there have been more than 12,000 users and more than one million successful builds.\n\n## What's next?\n\nIn June 2021, [GitLab acquired machine learning company UnReview](/press/releases/2021-06-02-gitlab-acquires-unreview-machine-learning-capabilities.html) which has allowed us to improve our machine learning capabilities as part of our DevOps Platform. Eric said that by practicing applied machine learning, specifically for code review, GitLab should be able to balance review workloads across teams to increase efficiency.\n\nKeeping all the DevOps activities in a single application makes it easier to extract insights throughout the software development lifecycle. By adding machine learning to a DevOps Platform such as GitLab or DevCloud, teams can not only derive data from past activities, but start to predict the future.\n\n \"We were very impressed by UBS's development culture,\" said Eric. \"It is very complimentary to our own, and we look forward to our continued partnership.\"\n\n## More of a video person?\n\nThis conversation was part of GitLab Virtual Commit 2021. Watch the video below to see the full conversation between Eric and Rick.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/Tof-7fDultw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[9,682,976,749],{"slug":8270,"featured":6,"template":686},"ubs-gitlab-devops-platform","content:en-us:blog:ubs-gitlab-devops-platform.yml","Ubs Gitlab Devops Platform","en-us/blog/ubs-gitlab-devops-platform.yml","en-us/blog/ubs-gitlab-devops-platform",{"_path":8276,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8277,"content":8283,"config":8288,"_id":8290,"_type":14,"title":8291,"_source":16,"_file":8292,"_stem":8293,"_extension":19},"/en-us/blog/ultimate-perks-for-open-source-projects",{"title":8278,"description":8279,"ogTitle":8278,"ogDescription":8279,"noIndex":6,"ogImage":8280,"ogUrl":8281,"ogSiteName":670,"ogType":671,"canonicalUrls":8281,"schema":8282},"Public open source projects are eligible for Ultimate tier features","GitLab's Open Source Program offers top-tier functionality and 50,000 CI pipeline minutes, for free. Learn more about applying.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667457/Blog/Hero%20Images/open_source_program_blog_image.jpg","https://about.gitlab.com/blog/ultimate-perks-for-open-source-projects","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Public open source projects are eligible for Ultimate tier features\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Orit Golowinski\"}],\n        \"datePublished\": \"2022-02-04\",\n      }",{"title":8278,"description":8279,"authors":8284,"heroImage":8280,"date":8285,"body":8286,"category":726,"tags":8287},[2219],"2022-02-04","\n> **2022-07-28 UPDATE:** As of 2022-07-01, all public open source projects on the Free tier have to apply to the Open Source program to continue receiving GitLab Ultimate benefits. In-app notifications informing impacted users of the change were active from 2022-04-18 through 2022-07-01. Please refer to the [FAQ](/pricing/faq-efficient-free-tier/#public-projects-on-gitlab-saas-free-tier) for more details.\n\nGitLab believes in a world where everyone can contribute and we like to support those who share our mission.\n\nGitLab exists today in large part thanks to the work of hundreds of thousands of open source contributors around the world. To give back to this community, the [GitLab for Open Source Program](/solutions/open-source/) was created to help open source teams be more efficient, secure, and productive by allowing them to use GitLab’s [top tier](/pricing/) capabilities.\n\nOpen source organizations have to meet the program requirements and actively apply in order to qualify. \n\nRequirements include:\n\n* **OSI-approved open source license**: All of the code you host in this GitLab group must be published under [OSI-approved open source licenses](https://opensource.org/licenses/category)\n* **Not seeking profit**: Your organization must not seek to make a profit through services or by charging for higher tiers. Accepting donations to sustain your efforts is ok. [Read more about this requirement here](/handbook/marketing/developer-relations/community-programs/opensource-program/#who-qualifies-for-the-gitlab-for-open-source-program).\n* Publicly visible: Your GitLab.com group or self-managed instance and your source code must be [publicly visible and publicly available](https://docs.gitlab.com/ee/user/public_access.html).\n\n[Learn more and apply](/solutions/open-source/join/) to the GitLab for Open Source Program.\n\n**Note**: Newly created public projects will no longer automatically receive the Ultimate tier benefits as of 2022-02-17, more details in [this FAQ entry](/pricing/faq-efficient-free-tier/#public-projects-on-gitlab-saas-free-tier). In order to receive the benefits, you will need to [apply](#how-to-apply) to the GitLab for Open Source Program.\n\n## Why apply to the GitLab for Open Source Program?\n\nThe GitLab for Open Source Program gives access to unlimited seats per license to features of [GitLab Ultimate](/pricing/ultimate/) (SaaS or Self-Managed), including 50,000 CI/CD minutes, for free to qualifying open source projects. GitLab Ultimate includes features that allow organization-wide security, compliance and planning. Some key features include:\n\n- [Multi-level Epics](https://docs.gitlab.com/ee/user/group/epics/)\n- [Portfolio-level Roadmaps](https://docs.gitlab.com/ee/user/group/roadmap/)\n- [Requirements Management](/direction/plan/#requirements-management)\n- [Compliance pipeline configuration](https://docs.gitlab.com/ee/user/project/settings/#compliance-pipeline-configuration)\n- [Chain of custody report](https://docs.gitlab.com/ee/user/compliance/compliance_report/#chain-of-custody-report)\n- [Vulnerability Database](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/#contributing-to-the-vulnerability-database)\n- [Dependency Scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/)\n- and much more...\n\nTo see a complete list of features, see our\n[Feature Comparison between tiers](/pricing/feature-comparison/) page. \n\n* Free top-tier accounts do not include support. However, you can purchase [support](/support/) for 95% off, at $4.95 per user per month.\n* Your program membership needs to be renewed annually. If you do not renew, [your account will be downgraded](/pricing/licensing-faq/#what-happens-when-my-subscription-is-about-to-expire-or-has-expired).\n* Acceptance into the GitLab for Open Source Program is at GitLab’s sole discretion, and we reserve the right to terminate the Program, or change the [Program requirements](/solutions/open-source/join/#requirements) at any time.\n\nIf you have any additional questions regarding this program, feel free to reach us at [opensource@gitlab.com](mailto:opensource@gitlab.com).\n\n## How to apply\n\n1. Set up a GitLab account. You’ll need to have a GitLab group already set up for your open source project or organization through a Free account or Free Trial. For help setting up a GitLab group, please see the relevant [FAQ](/solutions/open-source/join/#faqs). If you're considering a migration, you do not need to have finished the migration before applying to our program. You will need at least one project (repo) set up under your GitLab group to comply with our requirements.\n1. Take screenshots. During the application process, you’ll need to provide 3 screenshots of your project. We suggest taking them in advance, since you’ll need to submit them on page two of the application form.\n1. Fill out the [form](/solutions/open-source/join/#application) and submit it. \n\nApplications are reviewed and a response to your request can be expected within 10 business days. You may be asked to provide additional information.\n\nCover image by [Clay Banks](https://unsplash.com/photos/N3SsG7xR-Dg) on [Unsplash](https://unsplash.com)\n{: .note}\n",[682,1515,9],{"slug":8289,"featured":6,"template":686},"ultimate-perks-for-open-source-projects","content:en-us:blog:ultimate-perks-for-open-source-projects.yml","Ultimate Perks For Open Source Projects","en-us/blog/ultimate-perks-for-open-source-projects.yml","en-us/blog/ultimate-perks-for-open-source-projects",{"_path":8295,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8296,"content":8302,"config":8308,"_id":8310,"_type":14,"title":8311,"_source":16,"_file":8312,"_stem":8313,"_extension":19},"/en-us/blog/unreview-a-year-later-how-gitlab-is-being-transformed-by-ml-powered-code-review",{"title":8297,"description":8298,"ogTitle":8297,"ogDescription":8298,"noIndex":6,"ogImage":8299,"ogUrl":8300,"ogSiteName":670,"ogType":671,"canonicalUrls":8300,"schema":8301},"GitLab transforms code review with machine learning tools","Learn how last year's acquisition has resulted in impactful features for the One DevOps Platform.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668002/Blog/Hero%20Images/pg-gear.jpg","https://about.gitlab.com/blog/unreview-a-year-later-how-gitlab-is-being-transformed-by-ml-powered-code-review","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"UnReview a year later: How GitLab is transforming DevOps code review with ML-powered functionality\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2022-06-02\",\n      }",{"title":8303,"description":8298,"authors":8304,"heroImage":8299,"date":8305,"body":8306,"category":769,"tags":8307},"UnReview a year later: How GitLab is transforming DevOps code review with ML-powered functionality",[2862],"2022-06-02","\n\nA little over a year ago, [GitLab acquired UnReview](/press/releases/2021-06-02-gitlab-acquires-unreview-machine-learning-capabilities.html), a machine learning-based solution for automatically identifying [relevant code reviewers](/stages-devops-lifecycle/create/) and distributing review workloads and knowledge. Our goal is to integrate UnReview’s ML-powered code review features throughout GitLab, the One DevOps Platform. We checked in with Taylor McCaslin, principal product manager, ModelOps, at GitLab, to find out the impact UnReview has had so far and what comes next.\n\n**The idea of applying machine learning to code review was already underway at GitLab before the UnReview acquisition. What was it about ML/AI and automation that seemed a good fit for the code review process? How did the UnReview acquisition affect that strategy?**\n\nThe acquisition of UnReview gave GitLab a practical way to get started with a really focused value proposition that was obvious to the platform. ML/AI is a lot more than just having a useful algorithm. UnReview and its team gave GitLab talent with experience building MLOps pipelines and working with production DataOps workflows. As a source code management ([SCM](/solutions/source-code-management/)) and continuous integration ([CI](/topics/ci-cd/)) platform, MLOps and DataOps are key ambitions for our ModelOps stage. UnReview is the foundational anchor of our AI Assisted group, and we anticipate developing more ML-powered features with the base that we’ve built integrating UnReview into our One DevOps platform. If it’s something you manually set today within GitLab, we’ll consider suggestions and automations: suggested labels, assignees, issue relationships, etc. You can learn more about our plans on our [AI Assisted direction page](/direction/modelops/ai_assisted/).\n\n> You’re invited! Join us on June 23rd for the [GitLab 15 launch event](https://page.gitlab.com/fifteen) with DevOps guru Gene Kim and several GitLab leaders. They’ll show you what they see for the future of DevOps and The One DevOps Platform.\n\n**There were [three specific objectives with the UnReview project](/handbook/engineering/development/data-science/ai-assisted/projects/unreview/#overview) when you first started:**\n- **Eliminate the time wasted manually searching for an appropriate code reviewer to review code changes.**\n- **Make optimum recommendations that consider the reviewers’ experience and optimize the review load across the team, which additionally facilitates knowledge sharing.**\n- **Provide analytics on the state of code review in the project, explaining why a particular code reviewer is recommended.**\n\n**Have you had to change or add to these in any way?**\n\nWe now have Suggested Reviewers running for external beta customers as well as dogfooding it internally. We’ve learned a lot about what makes a good code reviewer. Some of the obvious things like context with the changed files and history of committing to that area of code are obvious. But there are less obvious things like what type of code someone has experience with (front-end or back-end).\n\nWe’re finding the concept of recency interesting: the idea that people who more recently interacted with files and functions may be better suited to review the code. Also, people leave companies, and that’s usually not something that can be inferred by the source graph, so we’re working on merging additional GitLab activity data with the recommendation engine.\n\nIn addition, we’re thinking a lot about bias in our recommendations. For example, a senior engineer likely has the most commits across a project, but we don’t always want to recommend a senior engineer. The more we work with the algorithm and recommendations, the more nuanced we find it.\n\nNot every organization does code review the same way, so we’re considering building different models for those that have no process versus organizations that have very rigid and hierarchical reviewer requirements. We also have to consider how recommendations interact with other features of the platform like code owners, maintainer roles, and commit access.\n\nWe’ve never been more excited about the potential of machine learning within GitLab. Some of the feedback we’ve had from beta customers are “this feels like magic” and that honestly encapsulates what we’re going for. Sometimes the right code reviewer is just a feeling that you can’t quite put your finger on. Through data and a little bit of magic, we may see Suggested Reviewers help speed up workflows, and cut down on back and forth and wasted time trying to find someone to do a great review of your code.\n\n**Introducing ML-powered features can come with challenges, especially being GitLab’s first data science feature. Can you speak to some of those challenges and how the team overcame them?**\n\nIt has been about a year since we closed the transaction. During that time period we’ve introduced a lot of new concepts to GitLab. Access to real-time data within the feature with DataOps extraction and cleaning of platform activity data. We have an end-to-end MLOps pipeline running 100% within GitLab CI that extracts, builds, trains, and deploys the UnReview model, and new observability metrics to know if the whole system is working. These are all foundational concepts that we’ve had to build from the ground up.\n\nAlso, we’ve introduced Python to the GitLab tech stack and have to develop new engineering standards and hiring interview practices to find the right talent for this team. We’re now turning the corner of this foundational work and I anticipate that relatively soon we’ll release Suggested Reviewers fully integrated with the platform and UI.\n\nMilestones have been part of the way we’ve sliced up the integration work. We have a variety of internal milestones we’ve been tracking against, including porting the model into GitLab SCM and CI, building the Dataops and MLOps pipelines, and internal and external customer betas. It’s helpful to have these milestones to know what’s most important at any given time and not to get overwhelmed with all the moving pieces. We’re paving a new path with ML-powered features at GitLab, and once we’re done we’ll have a repeatable process and template to replicate over and over with new data science-powered features.\n\n**What has been the most surprising thing you’ve encountered or learned since UnReview first debuted?**\n\nCode Reviewers are foundational to the software development lifecycle. We thought this would be a really straightforward feature, but it turns out people REALLY care about recommendations. People hate bad suggestions so when the recommendations are wrong, the feedback is fast and furious. But when it’s right, it feels like magic. That really surprised me how positively people respond to a great suggestion.\n\nA lot of GitLab users have asked me what our success metric is for Suggested Reviewers. It should just feel like magic. Maybe you don’t know why someone was chosen, but you just feel they were the right person to review the change. And hopefully that leads to a more thoughtful code review, reduces the back and forth of trying to find someone to review your code, and ultimately creates a better experience end-to-end. A lot of engineers dread code reviews; we want to change that. I hope Suggested Reviewers can take the pain out of the experience and make it something engineers look forward to. That’s the feeling we’re trying to create with our recommendations. Obvious but magic.\n\n**What’s next for UnReview specifically and DevOps code review more generally? Where do you see the next big advances happening?**\n\nWe’re just scratching the surface. There are so many opportunities for recommendations and automations across the platform. We have a lot of data at GitLab, from the source graph, contribution history, CI builds, test logs, security scans, and deployment data. We believe all of this can be integrated together. I’m particularly excited about what we’re calling [Intelligent Code Security](/direction/modelops/ai_assisted/#categories). The idea is that we will be able to look at your source code as you’re writing it, analyze it for security vulnerabilities, and not only suggest fixes to common security flaws, but also apply that change, run your CI, confirm the build succeeds, confirm the vulnerability was resolved, and possibly even deploy that change, all automatically.\n\nImagine the future where your code gets more secure automatically while you sleep. That sounds wild, but we have the data to power [a feature like this in the future](/direction/modelops/ai_assisted/#categories). Suggested Reviewers is just the beginning. We haven’t seen many DevOps platforms fully embrace the data, code, and activity data that they have in a material way. I think we’ll see a lot more in this space moving forward as development platforms identify the massive opportunities to drive efficiencies and remove the frustrating parts of software development from the process.\n",[9,771,976,231,1181],{"slug":8309,"featured":6,"template":686},"unreview-a-year-later-how-gitlab-is-being-transformed-by-ml-powered-code-review","content:en-us:blog:unreview-a-year-later-how-gitlab-is-being-transformed-by-ml-powered-code-review.yml","Unreview A Year Later How Gitlab Is Being Transformed By Ml Powered Code Review","en-us/blog/unreview-a-year-later-how-gitlab-is-being-transformed-by-ml-powered-code-review.yml","en-us/blog/unreview-a-year-later-how-gitlab-is-being-transformed-by-ml-powered-code-review",{"_path":8315,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8316,"content":8322,"config":8328,"_id":8330,"_type":14,"title":8331,"_source":16,"_file":8332,"_stem":8333,"_extension":19},"/en-us/blog/upgrading-database-os",{"title":8317,"description":8318,"ogTitle":8317,"ogDescription":8318,"noIndex":6,"ogImage":8319,"ogUrl":8320,"ogSiteName":670,"ogType":671,"canonicalUrls":8320,"schema":8321},"We are upgrading the operating system on our Postgres database clusters","Learn when these upgrades will happen and how they will help boost performance and reliability on GitLab.com.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669673/Blog/Hero%20Images/engineering.png","https://about.gitlab.com/blog/upgrading-database-os","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We are upgrading the operating system on our Postgres database clusters\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David Smith\"}],\n        \"datePublished\": \"2022-08-12\",\n      }",{"title":8317,"description":8318,"authors":8323,"heroImage":8319,"date":8325,"body":8326,"category":791,"tags":8327},[8324],"David Smith","2022-08-12","\nContinuing on the theme of [improving the performance and reliability of GitLab.com](/blog/path-to-decomposing-gitlab-database-part1/), we have another step we will be taking for our clusters of Postgres database nodes. These nodes have been running on Ubuntu 16.04 with extended security maintenance patches and it is now time to get them to a more current version. Usually, this kind of upgrade is a behind-the-scenes event, but there is an underlying technicality that will require us to take a maintenance window to do the upgrade (more on that [below](#the-challenge)).\n\nWe have been preparing for and [practicing this upgrade](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/7577) and are now ready to schedule the window to do this work for GitLab.com.\n\n## When will the OS upgrade take place and what does this mean for users of GitLab.com?\n\nThis change is planned to take place on 2022-09-03 (Saturday) between 11:00 UTC and 14:00 UTC. The implementation of this change is anticipated to include a **service downtime of up to 180 minutes** (see [reference issue](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/7543)). During this time you will experience complete service disruption of GitLab.com.\n\nWe are taking downtime to ensure that the application works as expected following the OS upgrade and to minimize the risk of any data integrity issues.\n\n> Join us at [GitLab Commit 2022](/events/commit/) and connect with the ideas, technologies, and people that are driving DevOps and digital transformation.\n\n## Background\n\nGitLab.com's [database architecture](/handbook/engineering/infrastructure/production/architecture/#database-architecture) uses two Patroni/Postgres database clusters: main and CI. We recently did functional decomposition and now the CI Cluster stores the data generated by CI GitLab features. Each Patroni cluster has primary and multiple read-only replicas. For each of the Patroni clusters, the Postgres database size is ~18 TB running on Ubuntu 16.04. During the scheduled change window, we will be switching over to our newly built Ubuntu 20.04 clusters.\n\n## The challenge\n\nUbuntu 18.10 introduced an updated version of glibc (2.28), which includes a [major update to locale data](https://wiki.postgresql.org/wiki/Locale_data_changes) and causes Postgres indexes created with earlier versions of glibc to be corrupted. Because we are upgrading to Ubuntu 20.04, our indexes are affected by this. Therefore, during the downtime window scheduled for this work, we need to detect potentially corrupt indexes and have them reindexed before we enable production traffic again. We currently have the following types and the approximate number of indexes:\n\n```\n Index Type | # of Indexes\n------------+--------------\n btree      |         4079\n gin        |          101\n gist       |            3\n hash       |            1\n```\n\nAs you can appreciate, given the sheer number (and size) of these indexes, it would take far too long to reindex every single index during the scheduled downtime window, so we need to streamline the process.\n\n## Options to upgrade to Ubuntu 20.04 safely\n\nThere are a number of ways to deal with the problem of potentially corrupt indexes:\n\na. Reindex **all** indexes during the scheduled downtime window\n\nb. Transport data to target 20.04 clusters in a logical (not binary) way, including:\n\n  - Backups/upgrades using pg_dump\n  - Logical replication\n\nc. Use streaming replication from 16.04 to 20.04 and during the downtime window, break replication and promote the 20.04 clusters followed by reindexing of potentially corrupt indexes\n\nIt might be feasible for a small to a medium-size Postgres implementation to use options a or b; however, at the GitLab.com scale, it would require a much larger downtime window and our aim is to reduce the impact to our customers as much as possible.\n\n## High-level approach for the OS upgrade\n\nTo perform an OS upgrade on our Patroni clusters, we use Postgres streaming replication to replicate data from our current Ubuntu 16.04 clusters to the brand new Ubuntu 20.04 standby Patroni clusters. During the scheduled downtime window, we will stop all traffic to the current 16.04 clusters, promote the 20.04 clusters by making them Primary and demote the Ubuntu 16.04 clusters by reconfiguring to act as Standby while replicating from the new 20.04 primaries. We will then reindex all the identified potentially corrupt indexes, and update DNS to point the application to the new 20.04 Patroni clusters before opening traffic to the public.\n\n## Identifying potentially corrupt indexes and our approach to handling the reindexing for different types of indexes\n\n### B-Tree\n\nWe use `bt_index_parent_check` [amcheck function](https://www.postgresql.org/docs/12/amcheck.html) to identify potentially corrupt indexes and we will reindex them during the downtime window.\n\n### GiST and Hash\n\nSince we do not have many GiST and Hash indexes, and reindexing them is a relatively quick operation, we will reindex them all during the downtime window.\n\n### GIN\n\nCurrently, the production version of amcheck is limited to detecting potential corruption in B-Tree indexes only. Our GIN indexes are reasonably sized and it would require a significant amount of time to reindex them during the scheduled downtime window, which is not feasible as we cannot have the site unavailable to our customers for that long. We have collaborated closely with our database team to produce a list of business-critical GIN indexes to be reindexed **during** the downtime window, and any other GIN indexes will be reindexed immediately after we open up traffic to the public using the [CONCURRENTLY](https://www.postgresql.org/docs/current/sql-reindex.html#SQL-REINDEX-CONCURRENTLY) option. Using this option means it will take longer to reindex, but it allows normal operations to continue while the indexes are being rebuilt.\n\n## Performance improvements\n\nWe started looking into options to improve the performance of the reindexing (see [reference issue](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/15559#note_940517257)). There are a couple of areas where we needed to improve performance.\n\n### Identify potentially corrupt B-Tree indexes quickly\n\nWhen we first started using the amcheck to identify potentially corrupt indexes, it was single threaded so it was taking just under five days to run the amcheck script to identify potentially corrupt indexes on production data. After a few iterations, our amcheck script now runs a separate background worker process for each index, so we essentially get a performance improvement of about 96 times when we use a 96 CPU core VM to run amcheck. The performance is limited by the time it takes to run amcheck on the largest index. The script is customizable to skip or include a specific set of tables/indexes, and we can decide the number of parallel worker processes to use based on the number of CPU cores available on the VM we use to run amcheck. Now with the improved speed, we can run the amcheck script on a copy of production data a day or two before the scheduled OS upgrade downtime window.\n\n### Improve reindexing speed to reduce the downtime\n\nOur initial test to reindex was performed sequentially with the default Postgres parameters. We have tested reindexing with different Postgres parameters and parallelized the reindex process. We are now able to perform our reindexing in less than half the time it used to take to reindex.\n\n## Reading material\n\nFor more information, please see the following links:\n\n- [Ubuntu 20.04 Upgrade Epic](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/637)\n- [Research on the types of indexes and steps to identify corruption](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/15384#note_867281334)\n",[726,1040,9],{"slug":8329,"featured":6,"template":686},"upgrading-database-os","content:en-us:blog:upgrading-database-os.yml","Upgrading Database Os","en-us/blog/upgrading-database-os.yml","en-us/blog/upgrading-database-os",{"_path":8335,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8336,"content":8341,"config":8346,"_id":8348,"_type":14,"title":8349,"_source":16,"_file":8350,"_stem":8351,"_extension":19},"/en-us/blog/use-inputs-in-includable-files",{"title":8337,"description":8338,"ogTitle":8337,"ogDescription":8338,"noIndex":6,"ogImage":3774,"ogUrl":8339,"ogSiteName":670,"ogType":671,"canonicalUrls":8339,"schema":8340},"Define input parameters to includable CI/CD configuration files","This is the first milestone of the long-term roadmap of the CI/CD Components Catalog roadmap.","https://about.gitlab.com/blog/use-inputs-in-includable-files","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Define input parameters to includable CI/CD configuration files\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2023-05-08\",\n      }",{"title":8337,"description":8338,"authors":8342,"heroImage":3774,"date":8343,"body":8344,"category":791,"tags":8345},[2120],"2023-05-08","\nIn GitLab 15.11, we introduced an exciting new feature that allows users to [define input parameters for includable configuration files](/releases/2023/04/22/gitlab-15-11-released/#define-inputs-for-included-cicd-configuration). With the ability to use input parameters in [CI templates](https://docs.gitlab.com/ee/development/cicd/templates.html), you can replace any keyword in the template with a parameter, including stage, script, or job name. For example, you can add a prefix to all of the jobs to better isolate them from the pipeline into which you are including the configuration.\n\nThese input parameters can be declared as mandatory or optional for each configuration file, reducing the need for global variables and making your CI/CD templates more robust and isolated. The input parameters are scoped to the included configuration only, which means they have no impact on the rest of the pipeline. This allows you to declare and enforce constraints, for example by enforcing mandatory inputs for templates.\n\nThis development is the first milestone of the long-term roadmap of the [CI/CD Components Catalog](https://gitlab.com/groups/gitlab-org/-/epics/7462), a new feature that will allow users to search and reuse single-purpose CI/CD configuration units with specific parameters for their use case. If you want to learn more about this exciting new development, you can read our [blog post about our CI templates feature](/blog/how-to-build-reusable-ci-templates/).\n\nIn this technical blog post, we will provide step-by-step instructions on how to define CI/CD templates with input parameters and how to use them when including templates.\n\n## Step 1: Create a template YAML document\nThe first step is to create a template YAML document that describes what input arguments can be used with the template. The second part of the template is the definition of the jobs that may include references to values using the interpolation format `$[[ inputs.input-name ]]`. You should use three dash lines between the two parts.\n\nHere is an example of a deploy-template.yml:\n\n```yaml\nspec:\n  inputs:\n    website:\n    environment:\n      default: test\n---\ndeploy:\n  stage: deploy\n  script: echo \"deploy $[[ inputs.website ]] to $[[ inputs.environment ]]\"\n```\n\nIn this template, we have defined two input parameters: website and environment. The environment parameter has a default value. In the content section, we define a job that interpolates the input arguments.\n\n## Step 2: Include the template in the CI configuration\nIn your main CI configuration file `.gitlab-ci.yml`, include the template and add input parameters using the `inputs` keyword.\n\nHere is an example of including the `deploy-template.yml` with input parameters:\n\n```yaml\ninclude:\n  - local: deploy-template.yml\n    inputs:\n      website: my-website.example.com\n```\n\nIn this example, we included a local template in our project. Note: You can use `inputs` with the other [include types](https://docs.gitlab.com/ee/ci/yaml/index.html#include) such as `include:project`, `include:template`, `include:remote`.\n\nIn the below example, we use inputs to add a prefix to jobs name, and make the stage dynamic as well.\n\n```yaml\nspec:\n  inputs:\n    website:\n    environment:\n      default: staging\n    stage:\n      default: test\n    job_prefix:\n      default: \"\"\n---\n\"$[[ inputs.job_prefix ]]deploy\":\n  stage: $[[ inputs.stage ]]\n  script: echo \"deploy $[[ inputs.website ]] to $[[ inputs.environment ]]\"\n```\n\nThen we can include it from the `.gitlab-ci.yml` with the input parameters:\n\n```\ninclude:\n  - local: deploy-template.yml\n    inputs:\n      stage: deploy\n      website: http://example.com\n      environment: production\n      job_prefix: \"my-app-\"\n```\n\nYou can [fork](https://gitlab.com/tech-marketing/ci-interpolation-example) this project, which uses the above examples:\n\n- [Dynamic job](https://gitlab.com/tech-marketing/ci-interpolation-example/-/blob/main/dynamic-job.yml)\n- [Dynamic script](https://gitlab.com/tech-marketing/ci-interpolation-example/-/blob/main/deploy-template.yml)\n- [Main CI configuration](https://gitlab.com/tech-marketing/ci-interpolation-example/-/blob/main/.gitlab-ci.yml)\n\nFor more information, please use our [online documentation](https://docs.gitlab.com/ee/ci/yaml/includes.html#define-input-parameters-with-specinputs).\n\nThat's it! You have successfully created CI templates that accept inputs and used them in a pipeline configuration. By using templates with inputs, you can simplify pipeline configuration and make templates more modular and reusable.\n\nThank you to [Fabio Pitino](https://gitlab.com/fabiopitino) and [Grzegorz Bizon](https://gitlab.com/grzesiek) for their content reviews.",[978,9,109],{"slug":8347,"featured":6,"template":686},"use-inputs-in-includable-files","content:en-us:blog:use-inputs-in-includable-files.yml","Use Inputs In Includable Files","en-us/blog/use-inputs-in-includable-files.yml","en-us/blog/use-inputs-in-includable-files",{"_path":8353,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8354,"content":8359,"config":8365,"_id":8367,"_type":14,"title":8368,"_source":16,"_file":8369,"_stem":8370,"_extension":19},"/en-us/blog/use-streaming-audit-events-to-connect-your-technology-stack-with-gitlab-and-pipedream",{"title":8355,"description":8356,"ogTitle":8355,"ogDescription":8356,"noIndex":6,"ogImage":1449,"ogUrl":8357,"ogSiteName":670,"ogType":671,"canonicalUrls":8357,"schema":8358},"Streaming audit events: Connect GitLab to your tech stack","Automation lets your DevSecOps teams have logic in place for how to handle events as they come in.","https://about.gitlab.com/blog/use-streaming-audit-events-to-connect-your-technology-stack-with-gitlab-and-pipedream","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Use Streaming Audit Events to connect your technology stack with GitLab and Pipedream\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sam Kerr\"}],\n        \"datePublished\": \"2022-06-27\",\n      }",{"title":8360,"description":8356,"authors":8361,"heroImage":1449,"date":8362,"body":8363,"category":875,"tags":8364},"Use Streaming Audit Events to connect your technology stack with GitLab and Pipedream",[5251],"2022-06-27","\n\nGitlab recently released [Streaming Audit Events](https://docs.gitlab.com/ee/administration/audit_event_streaming.html) to provide you real-time visibility into what happens inside your GitLab groups and projects. Whenever something happens, an event will be sent to the HTTPS destination of your choice. This is a great way to understand immediately when something has changed and if there is an action that needs to be taken.\n\nThese events are often used to drive automation to update GitLab in response to certain actions, such as creating a new issue to onboard a team member when an account is added to a group, or to restore the correct value of a [merge request approval setting](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/settings.html) if it is changed. We know that many users want to combine the streaming audit events with other data sets and tools they already work with. Taking automatic action in response to audit events happening can help ensure your GitLab groups and projects are always in the correct, compliant state.\n\n## Pipedream simplifies the streaming audit event process\n\nDriving automation off of these events or combining the events with other data sets means the destination which will receive the events needs to be running and have logic in place for how to handle the events as they come in. This normally would require setting up and maintaining a server with high availability to receive events as they happen, run any automation scripts, and then process the events if they needed to be sent to another tool or combined with another data set. This is tricky to do right and an extra step that takes time. \n\nEnter our partner, [Pipedream](https://pipedream.com/). \n\nPipedream lets you connect APIs, remarkably fast. This includes the new streaming audit events from GitLab. When you select the GitLab New Audit Events trigger in a Pipedream workflow, Pipedream will automatically register an HTTPS endpoint for audit events in your GitLab group:\n\n![Pipedream registration process](https://about.gitlab.com/images/blogimages/pipedreamscreenshot.png){: .shadow}\n\nFrom there, Pipedream allows you to transform the data, forward it to any other tools using Pipedream’s [prebuilt actions](https://pipedream.com/docs/workflows/steps/actions/), or write any custom automation [with code](https://pipedream.com/docs/code/) (i.e., Node.js, Python, Go, or Bash).\n\n## Getting started with Pipedream and GitLab\n\nThe video below shows an example of how to use GitLab streaming audit events and Pipedream together to automatically alert your security team if a sensitive project setting is changed. This is powerful because it ensures that your security teams can immediately take action when a change occurs and understand why it happened.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/ggzoUMEsjjU\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nThis is just one example of what you can do with Pipedream and GitLab together. Pipedream allows you to use any [GitLab API](https://docs.gitlab.com/ee/api/) in response to an audit event: You can change the setting to its original value, add comments to issues, kick off pipelines, and more. You can also trigger any action in any of the [700+ other apps](https://pipedream.com/apps) that it has built-in integrations with.\n\n## Open source integration means everyone can contribute\n\nPipedream and GitLab are both strong believers in open source. The integration is publicly available at the Pipedream [repository](https://github.com/PipedreamHQ/pipedream), and contributions are welcome! We are excited to see what sort of workflows you create with Pipedream and GitLab together.\n\n## Final thoughts\n\nIn this post, we talked about the power of GitLab’s [Streaming Audit Events](https://docs.gitlab.com/ee/administration/audit_event_streaming.html) to give you immediate visibility into your groups and projects and how Pipedream makes it easy to build and automate workflows based on those audit events. This was just a preview of what is possible though, as you can use the entire GitLab API within Pipedream in response to audit events or interact with other tools supported by Pipedream.\n\nWe are excited to see the workflows you build with GitLab and Pipedream together. We showed how you can create a GitLab issue to alert the security team when settings are changed, but the sky is the limit - you might also create issues when new user accounts are created to onboard new team members, automatically restore changed settings, or forward data to a security information and event management, a.k.a. SIEM, system. With Pipedream and Gitlab, you can automatically take the actions necessary when things change to ensure you remain secure and compliant.\n",[9,231,875],{"slug":8366,"featured":6,"template":686},"use-streaming-audit-events-to-connect-your-technology-stack-with-gitlab-and-pipedream","content:en-us:blog:use-streaming-audit-events-to-connect-your-technology-stack-with-gitlab-and-pipedream.yml","Use Streaming Audit Events To Connect Your Technology Stack With Gitlab And Pipedream","en-us/blog/use-streaming-audit-events-to-connect-your-technology-stack-with-gitlab-and-pipedream.yml","en-us/blog/use-streaming-audit-events-to-connect-your-technology-stack-with-gitlab-and-pipedream",{"_path":8372,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8373,"content":8379,"config":8384,"_id":8386,"_type":14,"title":8387,"_source":16,"_file":8388,"_stem":8389,"_extension":19},"/en-us/blog/use-waypoint-to-deploy-with-gitlab-cicd",{"title":8374,"description":8375,"ogTitle":8374,"ogDescription":8375,"noIndex":6,"ogImage":8376,"ogUrl":8377,"ogSiteName":670,"ogType":671,"canonicalUrls":8377,"schema":8378},"How to use HashiCorp Waypoint to deploy with GitLab CI/CD","Learn how to use Waypoint using GitLab CI/CD by following this step-by-step demo.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679260/Blog/Hero%20Images/using-hashicorp-waypoint-deploy-gitlab-cicd.jpg","https://about.gitlab.com/blog/use-waypoint-to-deploy-with-gitlab-cicd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use HashiCorp Waypoint to deploy with GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2020-10-15\",\n      }",{"title":8374,"description":8375,"authors":8380,"heroImage":8376,"date":8381,"body":8382,"category":726,"tags":8383},[766],"2020-10-15","\n\nHashiCorp announced a new project at [HashiConf Digital](https://hashiconf.com/) called [Waypoint](https://www.waypointproject.io/). \n\n## Hashicorp Waypoint\n\nHashicorp Waypoint uses an HCL based configuration file to describe how to build, deploy, and release applications to various cloud platforms, ranging from Kubernetes to AWS to Google Cloud Run. Think of Waypoint as if Terraform and Vagrant came together to describe how to build, deploy, and release your applications.\n\nTrue to form, Hashicorp released Waypoint as open source and with a lot of examples. The orchestration layer is up to you – Waypoint ships as a binary you can run right on your laptop or from whatever CI/CD orchestration tool you choose. Where you deploy is up to you as well since Waypoint shipped with support for Kubernetes, Docker, Google Cloud Run, AWS ECS, and a few others.\n\n## Benefits of Hashicorp Waypoint\n\nHashicorp Waypoint is an open-source developer workflow that can run from any laptop or CI/CD tool. Deployment is also easier because Hashicorp ships to several platforms like Kubernetes, AWS, and more. \n\nWhen using Hashicorp to build, deploy, and release applications, there are several features to keep in mind:\n\n* Waypoint provides a number of workflow examples as guides.\n\n* Build, deploy, and release your application with the single command of “waypoint up.”\n\n* Execute commands in a deployed application just as easily using “waypoint exec.”\n\n* Get a real-time look at application logs to help to debug quickly when necessary.\n\n## Orchestrating Waypoint using GitLab CI/CD\n\nUsing the fantastic [Waypoint documentation](https://www.waypointproject.io/docs) and the excellent [example applications](https://github.com/hashicorp/waypoint-examples) that HashiCorp provided, we decided to take a look at orchestrating Waypoint using [GitLab CI/CD](/topics/ci-cd/). To do this, we’ll start from the simple [AWS ECS Node.js app](https://github.com/hashicorp/waypoint-examples/tree/main/aws-ecs/nodejs) from the example repository.\n\nAfter cloning, we can see the structure of a Node.js application that displays a single page.\n\n![Folder structure of the Waypoint example and the page it produces](https://about.gitlab.com/images/blogimages/waypoint-example.png)\n\nYou’ll see that Dockerfile is missing from that project. There isn’t one included in the example, and we actually won’t need one because Waypoint is going to take care of that for us. Take a closer look at the `waypoint.hcl` file to see what it will do.\n\n```hcl\nproject = \"example-nodejs\"\n\napp \"example-nodejs\" {\n  labels = {\n\t\"service\" = \"example-nodejs\",\n\t\"env\" = \"dev\"\n  }\n\n  build {\n\tuse \"pack\" {}\n\tregistry {\n  \tuse \"aws-ecr\" {\n    \tregion = \"us-east-1\"\n    \trepository = \"waypoint-gitlab\"\n    \ttag = \"latest\"\n  \t}\n\t}\n  }\n\n  deploy {\n\tuse \"aws-ecs\" {\n  \tregion = \"us-east-1\"\n  \tmemory = \"512\"\n\t}\n  }\n}\n```\n\nIn the build step, Waypoint uses [Cloud Native Buildpacks (CNB)](https://buildpacks.io/) to detect the language of the project and create a Docker image without any Dockerfile. This is actually the same technology that GitLab uses as part of [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) in the Auto Build step. We’re excited to see CNB from the CNCF get more adoption by users in the industry.\n\nOnce that image is built, Waypoint will automatically push the image to our AWS ECR registry to get it ready for the deploy. Once the build has completed, the deploy step uses the [AWS ECS plugin](https://www.waypointproject.io/plugins/aws-ecs) to deploy our application to our AWS account.\n\nFrom my laptop, that’s easy. I can have Waypoint installed, be already authenticated to my AWS account, and it \"just works\". But what if I want to expand this beyond my laptop? And what if I want to automate this deployment as part of my overall CI/CD pipeline where all of my current unit, security, and other tests run today? That’s where GitLab CI/CD comes in!\n\n## Waypoint in GitLab CI/CD\n\nTo orchestrate all of this in GitLab CI/CD, let’s take a look at what we’ll need for our `.gitlab-ci.yml` file:\n\n1. First, we’ll need a base image to run inside of. Waypoint works on any Linux distribution and just needs Docker to run, so we can start from a generic Docker image.\n1. Next, we’ll install Waypoint to that image. In the future, we could build a [meta build image](/blog/building-build-images/) to containerize this process for us.\n1. Finally, we’ll run the Waypoint commands.\n\nAbove is all we’ll need for our pipeline to run the scripts required to get the deploy done, but we will need one more thing in order to deploy to AWS: We’ll have to authenticate to our AWS account. On [Waypoint’s roadmap](https://www.waypointproject.io/docs/roadmap), there are some mentions of plans around authentication and authorization. HashiCorp also released an exciting project in this space this week, [Boundary](https://www.boundaryproject.io/). But for now, we can handle authentication and authorization ourselves relatively simply.\n\nTo authenticate GitLab CI/CD with AWS, there are a few options. The first option is to use GitLab’s integration with [HashiCorp Vault](https://www.vaultproject.io/) if your team is already using Vault for credential management. Alternatively, if your team manages authorization through AWS IAM, you can ensure that the deploy job runs on a [GitLab runner](https://docs.gitlab.com/runner/) that is authorized to run the deployment with IAM. But if you’re just getting started with Waypoint and want to get going quickly, the final option is to add your AWS API Key and Secret Key as a [GitLab CI/CD variable](https://docs.gitlab.com/ee/ci/variables/) named `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`.\n\n## Putting it all together with Waypoint\n\nOnce the authentication is handled, we’re ready to go! Our final `.gitlab-ci.yml` looks like this:\n\n```yml\nwaypoint:\n  image: docker:latest\n  stage: build\n  services:\n    - docker:dind\n  # Define environment variables, e.g. `WAYPOINT_VERSION: '0.1.1'`\n  variables:\n    WAYPOINT_VERSION: ''\n    WAYPOINT_SERVER_ADDR: ''\n    WAYPOINT_SERVER_TOKEN: ''\n    WAYPOINT_SERVER_TLS: '1'\n    WAYPOINT_SERVER_TLS_SKIP_VERIFY: '1'\n  script:\n    - wget -q -O /tmp/waypoint.zip https://releases.hashicorp.com/waypoint/${WAYPOINT_VERSION}/waypoint_${WAYPOINT_VERSION}_linux_amd64.zip\n    - unzip -d /usr/local/bin /tmp/waypoint.zip\n    - rm -rf /tmp/waypoint*\n    - waypoint init\n    - waypoint build\n    - waypoint deploy\n    - waypoint release\n```\n\nYou can see that we start from the generic `docker:latest` image and set up some variables required by Waypoint. In the `script` section, we grab the latest Waypoint binary and install it to our local bin. Since our runner is already authorized with AWS, it’s as simple as running `waypoint init`, `build`, `deploy`, and `release`.\n\nThe output of the build job shows us the endpoint we’re deploying to:\n\n![Folder structure of the Waypoint example and the page it produces](https://about.gitlab.com/images/blogimages/waypoint-job-output.png)\n\nWaypoint is one of multiple [HashiCorp solutions that GitLab works great with](/partners/technology-partners/hashicorp/). For example, in addition to application delivery, we could orchestrate the underlying infrastructure with [Terraform through GiLab](https://docs.gitlab.com/ee/user/infrastructure/) as well. To standardize security in the SDLC, we could also integrate [GitLab with Vault](https://docs.gitlab.com/ee/ci/examples/authenticating-with-hashicorp-vault/) to manage secrets and tokens within CI/CD pipelines that provides consistency for developers and operators relying on secrets management during development testing as well as in production use.\n\nThe joint solutions developed by HashiCorp and GitLab are helping organizations find a better way for application development, and keeping delivery, and infrastructure management workflows in lock step. Waypoint is just another step in the right direction and we’re excited to see where the project goes from here. \n\n## Getting started with Hashicorp Waypoint\n\nYou can learn more about Waypoint at [waypointproject.io](https://www.waypointproject.io/). Also check out their [documentation](https://www.waypointproject.io/docs) and [roadmap](https://www.waypointproject.io/docs/roadmap) for the project. We have [contributed](https://github.com/hashicorp/waypoint/pull/492) everything we learned to the [GitLab CI/CD integration docs](https://www.waypointproject.io/docs/automating-execution/gitlab-cicd). You can also find a full working GitLab example in [this repository](https://gitlab.com/brendan-demo/waypoint) if you want to try it for yourself!\n",[1041,9],{"slug":8385,"featured":6,"template":686},"use-waypoint-to-deploy-with-gitlab-cicd","content:en-us:blog:use-waypoint-to-deploy-with-gitlab-cicd.yml","Use Waypoint To Deploy With Gitlab Cicd","en-us/blog/use-waypoint-to-deploy-with-gitlab-cicd.yml","en-us/blog/use-waypoint-to-deploy-with-gitlab-cicd",{"_path":8391,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8392,"content":8398,"config":8403,"_id":8405,"_type":14,"title":8406,"_source":16,"_file":8407,"_stem":8408,"_extension":19},"/en-us/blog/utilize-the-gitlab-devops-platform-to-avoid-cloud-migration-hazards",{"title":8393,"description":8394,"ogTitle":8393,"ogDescription":8394,"noIndex":6,"ogImage":8395,"ogUrl":8396,"ogSiteName":670,"ogType":671,"canonicalUrls":8396,"schema":8397},"Utilize the GitLab DevOps platform to avoid cloud migration hazards","The GitLab modern DevOps platform can simplify and accelerate planning, managing, moving, and modernizing applications and infrastructure as companies adopt a cloud-first posture on AWS and Google Cloud.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665811/Blog/Hero%20Images/daytime-clouds.jpg","https://about.gitlab.com/blog/utilize-the-gitlab-devops-platform-to-avoid-cloud-migration-hazards","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Utilize the GitLab DevOps platform to avoid cloud migration hazards\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nima Badiey\"}],\n        \"datePublished\": \"2022-01-25\",\n      }",{"title":8393,"description":8394,"authors":8399,"heroImage":8395,"date":4964,"body":8401,"category":769,"tags":8402},[8400],"Nima Badiey","\nThese unprecedented times have been an unexpected catalyst driving companies to finally get serious about moving to the cloud. The adoption wave started in retail and banking by consumers who were unable to shop and bank in-person and were forced instead to drastically increase their online purchases.\n\nAs a result, many e-commerce sites hosted on public clouds experienced a Cambrian explosion of activity and business. The impact of the pandemic soon crossed every industry and segment from healthcare and education to hospitality and food services, as more and more companies closed their offices in favor of remote work. With closed buildings came closed data centers and other short-staffing of business-critical services.\n\nCoupled with supply chain disruptions of compute, networking, and storage gear, many IT teams were faced with mounting business continuity challenges, which impacted service level agreements, product quality, and ultimately customer satisfaction.\n\nThe answer to these challenges is to move applications, data, and infrastructure from on-premises to the cloud, with hosting provided by large public cloud providers like Amazon Web Services (AWS) and Google Cloud – both of which are better suited to support business-critical services. \n\nAs businesses continue to define their new processes and procedures, one condition is likely to become permanent: Cloud adoption is expected to accelerate and spread across all industries. [IDC FutureScape](https://www.businesswire.com/news/home/20191029005144/en/IDC-FutureScape-Outlines-the-Impact-Digital-Supremacy-Will-Have-on-Enterprise-Transformation-and-the-IT-Industry) predicts that by 2024 more than 50% of all IT spending will go toward digital transformation and cloud-first innovation projects.\n\nDespite this immutable momentum, many CIOs remain reticent as 80% are still concerned that cloud adoption initiatives alone won’t deliver the expected business agility they need, according to [a McKinsey report](https://www.mckinsey.com/business-functions/mckinsey-digital/our-insights/unlocking-business-acceleration-in-a-hybrid-cloud-world).\n\nOne reason for this is that migrating and modernizing applications simultaneously to the cloud takes more effort and experience than organizations can afford. To be successful, organizations need to adopt new software development strategies and DevOps tools to support hybrid and multi-cloud models. These teams often lack the consistent methodology and toolchains to plan, prioritize, automate, and track the progress of cloud migration projects. Adding to the risks, many companies are hampered with legacy software development workflows, disconnected processes, and siloed tools. They are further burdened with a complicated inventory of mismatched legacy hardware, aging networks, security, and application stacks that are poorly suited to cloud-native architectures.\n\nUltimately, successful cloud migrations require mastering the basics by adopting proven, repeatable, and reliable processes such as breaking big initiatives into manageable workstreams. Consistency and structured repeatability have a greater impact on project success than executive sponsorship, funding, or upgrading the company culture to an “agile” mindset. GitLab plays a critical role in the successful deployment and delivery of these cloud migration projects. \n\n## DevOps: The first logical step in cloud adoption\n\nGitLab is a modern DevOps platform used by startups as well as midsize and Fortune 500 companies to build and deliver software through an integrated toolset. In simple terms, it’s Git for source code management with a built-in CI/CD pipeline that includes security, code scanning, and monitoring. GitLab is an all-in-one integrated platform. No need to digitally piece multiple solutions together and no more switching between different tools and apps just to deploy software code. \n\nAs enterprises plan to migrate apps, services, data, and/or infrastructure to the cloud this year, these projects will benefit from new ways to plan, manage, and deliver value from their cloud investments.\n\nTo get started, GitLab, together with AWS and Google Cloud, has chronicled this journey with valuable guidance to help cloud teams embrace the cultural shift necessary for modern agile teams. In these guides, we map out an approach that empowers cross-functional teams to work together concurrently during migrations, refactorization, and adoption of new cloud services.\n\nWith GitLab, users can define custom assessment methodologies, create repeatable task lists for application migration, store app code and Terraform configuration scripts in Git, and set security protocols easily through simple merge requests. GitLab can also automate the process of testing, scanning, monitoring, and deploying business apps. By embracing next-gen DevOps, cloud migration projects can be more successful with proven, repeatable, and reliable processes all managed on the GitLab DevOps platform. \n\n### Learn more:\n- [Migration to Google Cloud and adopting cloud native](https://learn.gitlab.com/gitlab-google-cloud)\n- [Accelerate your migration to AWS using a DevOps model](https://learn.gitlab.com/gitlab-aws-microsite)\n\n",[9,1041,3232],{"slug":8404,"featured":6,"template":686},"utilize-the-gitlab-devops-platform-to-avoid-cloud-migration-hazards","content:en-us:blog:utilize-the-gitlab-devops-platform-to-avoid-cloud-migration-hazards.yml","Utilize The Gitlab Devops Platform To Avoid Cloud Migration Hazards","en-us/blog/utilize-the-gitlab-devops-platform-to-avoid-cloud-migration-hazards.yml","en-us/blog/utilize-the-gitlab-devops-platform-to-avoid-cloud-migration-hazards",{"_path":8410,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8411,"content":8416,"config":8422,"_id":8424,"_type":14,"title":8425,"_source":16,"_file":8426,"_stem":8427,"_extension":19},"/en-us/blog/value-stream-total-time-chart",{"title":8412,"description":8413,"ogTitle":8412,"ogDescription":8413,"noIndex":6,"ogImage":5439,"ogUrl":8414,"ogSiteName":670,"ogType":671,"canonicalUrls":8414,"schema":8415},"Value stream optimization with GitLab's Total Time Chart","Learn how this new analytics feature provides immediate insights about the time spent in each stage of your workstream.","https://about.gitlab.com/blog/value-stream-total-time-chart","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Value stream management: Total Time Chart simplifies top-down optimization flow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Haim Snir\"}],\n        \"datePublished\": \"2023-06-01\",\n      }",{"title":8417,"description":8413,"authors":8418,"heroImage":5439,"date":8419,"body":8420,"category":769,"tags":8421},"Value stream management: Total Time Chart simplifies top-down optimization flow",[4146],"2023-06-01","\n\nUnderstanding where time is spent during the development lifecycle is a crucial insight for software leaders when optimizing the value delivery to customers. Our new Value Stream Analytics Total Time Chart is a visualization that helps managers uncover how long it actually takes to complete the development process from idea to production. Managers also can learn how much time teams spend in each stage of the workflow.\n \n![The VSA Total Time Chart displays the average time to complete each value stream stage.](https://about.gitlab.com/images/blogimages/2023-05-07-vsa-overview.gif){: .shadow}\nValue Stream Analytics Total Time Chart\n{: .note.text-center}\n\nValue Stream Analytics is available out of the box in the GitLab platform. It surfaces the process and value delivery metrics through the unified data model that stores all the records around development efforts. Value Stream Analytics uses a backend process to collect and aggregate stage-level data into [three core objects](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#how-value-stream-analytics-works):\n\n- Value streams - container objects with stage list \n- Value stream stage - an event pair of start and end events\n- Value stream stage events - the smallest building blocks of the value stream. For example, from Issue created to Issue first added to board. See the [list of available stage events](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#value-stream-stage-events).\n\n> [Register for the GitLab 16 webinar](/sixteen/), where we will unveil the latest innovations in our AI-powered DevSecOps platform.\n\nWe added in the new chart the stages breakdown as a stacked area chart to make it easier to understand how each stage contributes to the total time, and how that changes over time. Each area in the chart represents a stage. By comparing the heights of each area, you can get an idea about how each stage contributes to the total time of the value stream. We also added a tool tip with the stages breakdown sorted top to bottom, to help you understand the stages in their correct order.\n\nThe new chart is available in the Value Stream Analytics Overview page (on the left sidebar, select **Analytics > Value stream**). This page includes four sections:\n  1.  Data filter text box - on the top of the Overview page you can use the [Data filters](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#data-filters) to view data that matches specific criteria or date range. \n  2. Stage navigation bar - below the filter text box you can use the the stage navigation bar to investigate what happened in the specific stage and to identify the items (issues/MRs) that are slowing down the stage time.\n  3. Key metrics tiles - the summary of the stream performance is displayed, above the chart in the [Key metrics tiles](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#key-metrics). \n  4. Overview charts - the newly added Total Time Chart and the [Task by type](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#view-tasks-by-type) chart. \n\nBut that's not all. The Total Time Chart also simplifies the top-down optimization flow, starting from the Value Streams Dashboard organization-level view to a drill-down into the performance of each project:\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/EA9Sbks27g4\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\n\nFrom the Value Stream Analytics overview page, you can drill down from Key metrics tiles into other GitLab analytics pages for deeper investigations. You can also go up to the Value Streams Dashboard, or investigate the [DORA metrics](/solutions/value-stream-management/dora/) that are also available in the new dashboard.\n\nIt's important to note that the chart data is limited to items completed within the selected date range. Also, there could be points in time with no [\"stage event\"](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#value-stream-stage-events) actions. In these cases, the chart will display a dashed line to represent the missing data. These gaps can add contextual information about the workstream, and usually do not represent interruptions in the data. When there is \"no data\" for a specific stage, the stage line will be flat.\n\nTo learn more check out the [Value Stream Analytics documentation](https://docs.gitlab.com/ee/user/group/value_stream_analytics/).\n\nWith the Value Stream Analytics Total Time Chart, you get immediate insights about the time spent in each stage over time to determine if progress is being made. Try it out today and see the difference it can make in your workstream!\n",[855,9,916,1040,683],{"slug":8423,"featured":6,"template":686},"value-stream-total-time-chart","content:en-us:blog:value-stream-total-time-chart.yml","Value Stream Total Time Chart","en-us/blog/value-stream-total-time-chart.yml","en-us/blog/value-stream-total-time-chart",{"_path":8429,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8430,"content":8436,"config":8440,"_id":8442,"_type":14,"title":8443,"_source":16,"_file":8444,"_stem":8445,"_extension":19},"/en-us/blog/velocity-with-confidence",{"title":8431,"description":8432,"ogTitle":8431,"ogDescription":8432,"noIndex":6,"ogImage":8433,"ogUrl":8434,"ogSiteName":670,"ogType":671,"canonicalUrls":8434,"schema":8435},"How GitLab 14 satisfies the need for speed with modern DevOps","GitLab 14: Ship with velocity, ship with confidence","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682089/Blog/Hero%20Images/racecar_devops.jpg","https://about.gitlab.com/blog/velocity-with-confidence","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab 14 satisfies the need for speed with modern DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Parker Ennis\"}],\n        \"datePublished\": \"2021-07-29\",\n      }",{"title":8431,"description":8432,"authors":8437,"heroImage":8433,"date":4735,"body":8438,"category":791,"tags":8439},[2959],"\n\n## How DevOps and NFS changed the game\n\nWhat if I told you that one of the best-selling racing video game franchises of all time, the \"Need For Speed\" (NFS), and DevOps have more in common with each other than you think? Yes, you read that correctly, probably not the NFS (Network File System) you were expecting.\n\n### An appetite for change\n\nFor context, the NFS series originally set out to redefine a saturated, yet unsophisticated, racing video game market. Motivated by an appetite for change, the NFS user experience reflected the human connection to real cars and how they behaved, which was a big challenge for developers in the 1990s. Nearly 30 years ago, \"The Need for Speed\" forever changed the landscape of racing games, selling 150 million copies since its debut.\n\n![The original Need For Speed game from 1994](https://about.gitlab.com/images/blogimages/need_for_speed.png){: .shadow.center}\nThe original Need For Speed video game set a new standard with an appetite for industry change.\n{: .note.text-center}\n\nCoincidentally, it was in 1994 that Grady Booch coined the term \"continuous integration\" (CI). Booch, like NFS, paved the way for immense industry growth in the realm of software development. CI aimed to redefine the manual, time-consuming development processes that paid little mind to how real humans and developers behaved and collaborated around application development by [leveraging automation to increase development speed without sacrificing quality](/topics/ci-cd/benefits-continuous-integration/).\n\nSimilar to how NFS took the racing scene by storm and laid the groundwork for the racing game genre, CI evolved into what is arguably the most important piece of DevOps best practices today: Continuous integration and continuous delivery (CI/CD).\n\nDevOps continues to evolve, but without CI/CD, DevOps isn't the collaborative practice that helps teams work faster and more efficiently. CI/CD is a super power within DevOps – unlocking the potential to ship apps with increased velocity and confidence in their quality, without having to choose one or the other.\n\n### DIY DevOps vs Modern DevOps\n\nToday, it doesn't matter what your business does, it's going to involve some amount of using and building software. DevOps gained traction in the age of digital transformation, where the rate of technical innovation acted as a forcing function for companies to fail or survive. Over the past 10 years or so, organizations had a choice to either embrace this \"need for speed\" and adopt DevOps practices, or be displaced by their competition.\n\nThis scramble led to a \"DIY\" style of DevOps that couldn't deliver on its promises much of the time. For many organizations, the biggest problem wasn't just the brittle toolchains composed of disparate pieces of software but also trying to make these complicated toolchains and processes benefit from DevOps. Since uprooting everything wasn't an option, the root of the problem was still there, and DevOps was hard to adopt.\n\nFor all the teams DevOps has helped, the DevOps marketplace must continuously improve and evolve as we learn more about the challenges of modernizing workflows. DevOps must modernize alongside businesses to ensure it's an accessible and realistic framework for as many companies as possible to leverage.\n\n### GitLab 14 fuels the modern DevOps need for speed\n\nWith a platform-driven approach, [GitLab 14](/releases/2021/06/22/gitlab-14-0-released/) delivers a consistent and efficient developer and operator experience that leads to a simplified and more predictable SDLC. A single user interface, embedded security, and a unified data store are just some of the features of a platform any company can use without the tradeoffs of the DIY DevOps past. By using one tool for source code management, CI, and CD, teams are more efficient and productive with streamlined collaboration. Engineers are happier when focused on value-add than when maintaining integrations – and happy developers help attract and retain talent.\n\n[GitLab 14](/gitlab-14/) ushers in a new era of modern DevOps as a global movement, and I'm excited to talk a little bit about some of its capabilities that help you ship software faster, with a higher degree of confidence, and improve your ability to respond to market changes.\n\n### Ship with velocity and confidence\n\n**1. [GitLab pipeline editor](/releases/2021/01/22/gitlab-13-8-released/#pipeline-editor)**\n\nCrafting pipelines can be complicated and verbose without an understanding of advanced pipeline syntax and how it fits within the workflow using the '.gitlab-ci.yml' configuration file. Needing to craft pipelines from scratch presents a steeper learning curve for organizations and teams with a less mature DevOps culture. The GitLab pipeline editor lowers the barrier to entry for CI/CD novices and accelerates power users with visual authoring and versioning, continuous validation, and pipeline visualization. Whether you're a more advanced user or novice, the pipeline editor unlocks additional power and usability.\n\n![Pipeline editor linting capability makes pipeline authoring easier](https://about.gitlab.com/images/blogimages/lint_ci.png){: .shadow.center}\nPipeline editor linting capability makes pipeline authoring easier and more efficient.\n{: .note.text-center}\n\nHere's what some of our wider community is saying about the pipeline editor:\n\n> \"I really like the direction of making CI/CD more accessible to first-time users and how GitLab rolls out this feature piece by piece.\" - Bernhard Knasmüller, computer scientist\n\n> \"This is going to improve the CI/CD configuration experience greatly!\" - Olivier Jourdan, developer\n\n**2. [GitLab Agent for Kubernetes](https://youtu.be/17O_ARVaRGo)**\n\nThe GitLab Agent for Kubernetes enables secure, cloud-native [GitOps](/solutions/gitops/). GitLab also meets customers where they are by supporting GitOps with agent-based and agentless approaches, and for deployments anywhere, regardless of whether infrastructure is cloud-native. It also enables alerts based on network policies for pull-based deployments.\n\nHere's piece of feedback from the wider GitLab community on the Kubernetes Agent:\n\n> \"GitLab is leading the evolution of DevOps by optimising work efficiency and cloud-native integration capabilities. This enables the rapid delivery of digital value.\" - Vasanth Kandaswamy, Head of Data and Applications Portfolio, Fujitsu Australia\n\nWe look forward to iterating and improving these capabilities and always [welcome your feedback](/submit-feedback/#product-feedback) on our product.\n\n### What's next?\n\nOne thing is for sure: **people want to go fast,** but not when it requires sacrificing peace of mind and quality. We're committed to helping you ship with velocity and confidence by [investing in specific product areas](/direction/#fy22-product-investment-themes) to bring the benefits of modern DevOps to anyone using GitLab to deliver their applications.\n\n![Go fast with confidence](https://about.gitlab.com/images/blogimages/gofast.gif){: .shadow.center}\nEven Ricky Bobby from Talledega Nights agrees. People just want to go fast!\n{: .note.text-center}\n\nWe'll continue executing on our [vision for CI/CD](https://gitlab.com/groups/gitlab-org/-/epics/4534) to create a visual pipeline authoring experience built right into GitLab that simplifies the complexity, letting you quickly create and edit pipelines while still exposing advanced options when you need them.\n\nWe're also committed to making sure you can deploy anytime and anywhere to take advantage of the benefits of Kubernetes, no matter where you are at on your cloud native development journey. If you have feedback or suggestions on what we can do better, please [let us know in our product epic.](https://gitlab.com/groups/gitlab-org/-/epics/3329)\n\nWe look forward to delivering you more value as we iterate upon this new era of GitLab 14 going foward and can't wait to see the great things you're creating with Gitlab.\n\n_This blog is part three in a three-part series on the top capabilities of GitLab 14. Learn more about [how GitLab 14 prepares you for DevSecOps 2.0 in part one](/blog/are-you-ready-for-the-newest-era-of-devsecops/), and about [how to optimize DevOps with GitLab 14's enhanced visibility tools in part two](/blog/optimizing-devops-visibility-in-gitlab-14/)._\n\nCover image by [CHUTTERSNAP](https://unsplash.com/@chuttersnapk) on [Unsplash](https://unsplash.com/photos/5Yo1P9ErikM)\n{: .note}\n",[9,728,976,977,534],{"slug":8441,"featured":6,"template":686},"velocity-with-confidence","content:en-us:blog:velocity-with-confidence.yml","Velocity With Confidence","en-us/blog/velocity-with-confidence.yml","en-us/blog/velocity-with-confidence",{"_path":8447,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8448,"content":8454,"config":8461,"_id":8463,"_type":14,"title":8464,"_source":16,"_file":8465,"_stem":8466,"_extension":19},"/en-us/blog/verizon-customer-story",{"title":8449,"description":8450,"ogTitle":8449,"ogDescription":8450,"noIndex":6,"ogImage":8451,"ogUrl":8452,"ogSiteName":670,"ogType":671,"canonicalUrls":8452,"schema":8453},"Verizon cuts datacenter rebuilds from 30 days to 8 hours","Verizon utilized microservices, automation, and GitLab to reduce datacenter rebuilds to under 8 hours.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678933/Blog/Hero%20Images/verizon_video_blog.jpg","https://about.gitlab.com/blog/verizon-customer-story","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Verizon Connect reduced datacenter rebuilds from 30 days to under 8 hours with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kim Lock\"}],\n        \"datePublished\": \"2019-02-14\",\n      }",{"title":8455,"description":8450,"authors":8456,"heroImage":8451,"date":8458,"body":8459,"category":1318,"tags":8460},"How Verizon Connect reduced datacenter rebuilds from 30 days to under 8 hours with GitLab",[8457],"Kim Lock","2019-02-14","\nIn 2016, the [Verizon Connect](https://www.verizonconnect.com/) Telematics Container Cloud Platform team was struggling with data center\nbuilds that took 30 days. Working with legacy systems that included Java-based, monolithic\napplications, they also had a variety of disparate tools including BitBucket, Jenkins, and Jira\nin use throughout their environment.\n\n### Starting from scratch to move to microservices and increase automation\n\nThe group looked to move to a [microservices architecture](/blog/strategies-microservices-architecture/) to improve deploy speed and increase\nautomation. They also wanted to overcome manual errors, disjointed processes, and\nmanual deploys. \"We were just spending too much time doing stuff manually, so we decided\nto just start fresh and write everything from scratch,\" says Mohammed Mehdi, Principal DevOps, Verizon.\n\nAs they created this new infrastructure, they kept four key components in mind: architecture,\nautomation, extensibility, and being proactive and prepared for the future. They wanted to rebuild\ntheir data centers in less than 12 hours, instead of 30 days. They had a goal of 100 percent CI/CD.\nThey wanted to remove manual deployments, especially around the server and network deployments.\nThe team also focused on avoiding vendor lock-in by seeking open source tools to help them accomplish these goals.\n\nThe team looked to improve automation by focusing on simplification, standardization, and providing end-to-end visibility.\n\"We wanted easily repeatable, with zero-touch, zero-downtime deployments, automated tracking,\" Mehdi explains.\n\n### A single solution to meet their needs\n\nThe team chose GitLab to support this infrastructure initiative because it met a number of their qualifications, including being open source and offering Windows support. The team liked that it is easy to use and the UI easy to understand.\n\n\"Some of the other features that we really loved, and we didn’t find with any other CI/CD tool, are the project management\nfeatures,\" Mehdi says. \"GitLab replaced a bunch of disparate systems for us like Jira, BitBucket, and Jenkins. GitLab\nprovided us with a one-stop solution.\"\n\nThe Verizon Connect Telematics Container Cloud Platform team is using GitLab for:\n\n- [Code review](/blog/demo-mastering-code-review-with-gitlab/)\n- [CI/CD](/solutions/continuous-integration/)\n- [Issue tracking](/pricing/feature-comparison/)\n- [Source Code Management](/solutions/source-code-management/)\n- [Audit Management](https://docs.gitlab.com/ee/administration/audit_events.html)\n- [ChatOps](https://docs.gitlab.com/ee/ci/chatops/)\n\nThe team has successfully achieved deployment flexibility and are platform agnostic. They now have\nstreamlined processes and developers can truly focus on differentiating tasks.\n\nThe team was able to reduce their complete datacenter deploy\nprocess to under eight hours because of the streamlined deploy and build processes\nthey enabled using GitLab. Learn how [Verizon Connect](https://www.verizonconnect.com/) is achieving this success by watching\nmore about their story and how they achieved their targets in [the YouTube video](https://youtu.be/zxMFaw5j6Zs) below.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/zxMFaw5j6Zs\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nThanks for giving GitLab a shot, Verizon Connect!\n\nCover image by [chuttersnap](https://unsplash.com/@chuttersnap) on [Unsplash](https://unsplash.com)\n{: .note}\n",[793,109,9,1829,683],{"slug":8462,"featured":6,"template":686},"verizon-customer-story","content:en-us:blog:verizon-customer-story.yml","Verizon Customer Story","en-us/blog/verizon-customer-story.yml","en-us/blog/verizon-customer-story",{"_path":8468,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8469,"content":8474,"config":8479,"_id":8481,"_type":14,"title":8482,"_source":16,"_file":8483,"_stem":8484,"_extension":19},"/en-us/blog/vscode-workflow-new-features",{"title":8470,"description":8471,"ogTitle":8470,"ogDescription":8471,"noIndex":6,"ogImage":4938,"ogUrl":8472,"ogSiteName":670,"ogType":671,"canonicalUrls":8472,"schema":8473},"Four new tools for your Visual Studio Code and GitLab tool belt","Learn about new features that can help you review MRs and interact with GitLab","https://about.gitlab.com/blog/vscode-workflow-new-features","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Four new tools for your Visual Studio Code and GitLab tool belt\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tomas Vik\"}],\n        \"datePublished\": \"2021-11-17\",\n      }",{"title":8470,"description":8471,"authors":8475,"heroImage":4938,"date":7918,"body":8477,"category":791,"tags":8478},[8476],"Tomas Vik","\n\nIn our [previous post](/blog/mr-reviews-with-vs-code/), we talked about merge request (MR) Reviews. We explained how the GitLab Workflow extension helps you review MRs without leaving VS Code. Since releasing and polishing the MR reviews, we've been working on improvements to the extension. In this post, we will show you how the latest features fit into your workflow.\n\n### Do you have a lot to say? Use a snippet patch!\n\nOn GitLab's web UI there's the \"suggestions\" feature. It's handy for suggesting small changes in the MR review. The VS Code platform doesn't let us recreate the same experience, but the extension offers an alternative: Snippet patches.\n\nSnippet patches are code changes (git patches) of arbitrary size shared as GitLab snippets. Because they don't have a size limit, they are perfect for suggesting changes to multiple files during the MR review.\n\nThe extension has two commands, `Create snippet patch` and `Apply snippet patch`. These commands use `git diff` and `git apply`, respectively, which means people can still apply the snippet patch even if they don't use the GitLab Workflow extension.\n\nIf a suggestion in the comment is a hammer, then a snippet patch is a pneumatic tamping machine. Next time you'll review an MR, and you see a lot of space for improvement, remember the adage: \"A patch is worth a thousand words\".\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube-nocookie.com/embed/QQxpLoKJULQ\" title=\"YouTube video player\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\n### What's going on with my pipeline? - Improved CI status display\n\nThe extension always showed the latest CI pipeline status in both the status bar and the sidebar. However, if you tried to gauge your pipeline status, you probably run into one or more surprises. The status was hard to understand. Sometimes it related to a different branch, or it was out of date.\n\nWe've made the pipeline status much more reliable and readable. For starters, you can now see individual jobs and their status in the sidebar. Click on any job, and the extension opens a browser window with the GitLab job page.\n\nWe also improved the consistency of showing the pipeline status. The status bar and sidebar are now in sync and always showing pipeline for the current branch.\n\nWe are excited about the cleaner code. It makes it easier for anyone to contribute functionality. If you'd be interested in giving it a shot, we recommend starting with the [Download artifacts from the latest pipeline](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues/106) feature request. \n\n\n![VS Code status bar](https://about.gitlab.com/images/blogimages/2021-11-05-vscode-workflow-new-features/ci-pipeline-panel.png){: .shadow.medium.center}\nVS Code CI Pipeline status overview from GitLab extension.\n{: .note .text-center}\n\n### Make the MR your own - Working with checked out code\n\nTwo recent improvements play well together to make your review more interactive. They help you spend less time on actions that don't directly relate to reviewing code. These improvements let you check out the MR branch and open a local file during a review.\n\n#### Check out the MR branch\n\nYou can checkout any MR locally, as long as it is not coming from a forked project. Right-click the MR in the side tree and select \"Checkout MR Branch\". After the command finishes, you'll have the MR branch checked out in your project. Now you can review and run the code.\n\n\u003Cfigure class=\"video_container\">\n  \u003Cvideo src=\"https://gitlab.com/gitlab-org/gitlab-vscode-extension/uploads/db804234ed4d338dea31a27778dba72e/checkout-mr-branch.mp4\" controls=\"true\" data-setup=\"{}\" data-title=\"checkout-mr-branch\" preload=\"metadata\" width=\"560\">\u003C/video>\n\u003C/figure>\n{: .shadow.medium.center}\n\n#### Open a local file during a review\n\nWhen you look at a changed file in an MR, you can click on a small \"file\" icon in the top-right corner. The extension will open the same file in your local repository.\n\nIf your local branch is different from the MR branch, the local file might not be the same as the MR file.\n\nOpening the local file is useful when you want to explore the surroundings of the file quickly. The VS Code automatically focuses the file in the file tree, which lets you see all the neighbouring files.\n\n\u003Cfigure class=\"video_container\">\n  \u003Cvideo src=\"https://gitlab.com/gitlab-org/gitlab-vscode-extension/uploads/de2839b1ceb1be6c33cd80d7fe72bc6d/open-mr-file.mp4\" controls=\"true\" data-setup=\"{}\" data-title=\"open-mr-file\" preload=\"metadata\" width=\"560\">\u003C/video>\n\u003C/figure>\n{: .shadow.medium.center}\n\n### Commitment problems? Browse repositories without checking them out\n\nAt GitLab, we've got some large repositories. The largest, which all GitLabbers use daily, is [www-gitlab-com](https://gitlab.com/gitlab-com/www-gitlab-com), the website you see when you visit `about.gitlab.com`. This 6 GB colossus takes several minutes to check out.\n\nExploring this repository is a perfect use case for our latest feature: Remote Repositories, [contributed by Ethan Reesor](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/merge_requests/321), a community member.\n\nRun the `GitLab: Open Remote Repository` command, pick which project and branch you want to use, and _voilà_.  The extension opens the repository in your local workspace, but it doesn't store data on your local machine.\n\nRemote repositories are useful when you want to browse a repository for a reference but don't plan to change the code.\n\nThis is the first iteration, and it's got some limitations - you can't use full-text search, fuzzy file navigation, and the files are read-only. It's useful nonetheless.\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube-nocookie.com/embed/p4GTVx_Nd2s\" title=\"YouTube video player\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\n### Thank you community!\n\nMost of the features introduced in this post are either implemented or suggested by a community member. Ahmed Mohamadeen [suggested](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues/390) opening local file during MR review, Musisimaru [created initial implementation](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/merge_requests/203) of checking out MR branch, and Ethan Reesor [implemented](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/merge_requests/321) remote repositories.\n\n\nIf you'd like to shape the future of the GitLab Workflow VS Code extension, you can create issues in [our issue tracker](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues), or look for [issues where we accept MRs](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues?label_name%5B%5D=Accepting+merge+requests). Our [CONTRIBUTING](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/blob/main/CONTRIBUTING.md) guide is an excellent place to start.\n\nCover image by [Ljubica Petkovic](https://ljubicapetkovic.com), licensed under [CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/)\n{: .note}\n",[231,978,9],{"slug":8480,"featured":6,"template":686},"vscode-workflow-new-features","content:en-us:blog:vscode-workflow-new-features.yml","Vscode Workflow New Features","en-us/blog/vscode-workflow-new-features.yml","en-us/blog/vscode-workflow-new-features",{"_path":8486,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8487,"content":8493,"config":8498,"_id":8500,"_type":14,"title":8501,"_source":16,"_file":8502,"_stem":8503,"_extension":19},"/en-us/blog/wag-labs-blog-post",{"title":8488,"description":8489,"ogTitle":8488,"ogDescription":8489,"noIndex":6,"ogImage":8490,"ogUrl":8491,"ogSiteName":670,"ogType":671,"canonicalUrls":8491,"schema":8492},"How Wag! cut their release process from 40 minutes to just 6","The popular dog-walking app is rolling out new features faster and with more confidence as they adopt GitLab for more of their DevOps workflows.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678923/Blog/Hero%20Images/dog-walking.jpg","https://about.gitlab.com/blog/wag-labs-blog-post","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Wag! cut their release process from 40 minutes to just 6\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2019-01-16\",\n      }",{"title":8488,"description":8489,"authors":8494,"heroImage":8490,"date":8495,"body":8496,"category":1318,"tags":8497},[3485],"2019-01-16","\nDo you own a dog and work outside of the home? If you do, or even just know someone who does, you know that finding a trustworthy caretaker is of the utmost importance. With dog walkers in cities and towns across the U.S., the folks at [Wag!](https://wagwalking.com/about) have proven to be a source of reliable caretakers for countless fur parents. In three years, the company has powered more than one billion walks via its app for on-demand dog walking, sitting, and boarding, that boasts of millions of users.\n\nWag! recently signed on with GitLab to make the most of their engineering hours and bring their customers new features and updates at a faster clip.\n\n### From version control, to CI, to the full pipeline\n\nHaving previously used GitLab as their main source of truth for repositories, Wag! initially planned to return to the app solely for [continuous integration (CI)](/solutions/continuous-integration/). But after giving it a whirl, they quickly expanded their strategy to include the use of other features.\n\n\"We started our GitLab project about seven or eight months ago,\" explains [Dave Bullock](https://www.linkedin.com/in/eecue), director of engineering at Wag! \"The original idea was to just use it as our CI platform. But as we built that out, we started using it for more and more tasks, and ended up using it for our full [CI/CD pipeline](/topics/ci-cd/). That includes both our application, so the CI/CD that powers the API, along with our infrastructure. We use GitLab with Terraform to test, review, save, and deploy all of our infrastructure as well as the application on two separate pipelines. Every team uses it in their application, whether it's the Android application, the web application, the API, or our infrastructure; it's all being tested, built, and deployed through GitLab.\"\n\n### Streamlining to a single application\n\nPart of GitLab's appeal stemmed from the [ability to do everything in one place](/topics/single-application/). Wag! was searching for an [integrated solution](/solutions/continuous-integration/) that would streamline their development process, and they found it in GitLab.\n\n\"We were previously using a combination of Travis and other random technologies, and we just wanted something with a little bit better interface, a little more control, and something that we owned as far as the hosting and the management,\" says Bullock. \"We really wanted to move towards a single, full-service application.\"\n\n>\"We just wanted something with a better interface, a little more control, and something that we owned as far as the hosting and the management. We really wanted to move towards a single, full-service application.\"\n\nThe impact of that choice is also being felt on the infrastructure side. Wag!'s infrastructure engineers no longer have to manually stage and test their work. They are now following the same basic workflow that is used for their app, while integrating Terraform to manage their infrastructure.\n\n\"Basically, one of our DevOps team members will make a change, cut a pull request, and it'll be reviewed by the team. If it looks good, we'll say, 'Okay, cool. Merge it into master,'\" Bullock explains. \"If it's one of the modules, we'll tag that module, update the reference to it, and then the CI pipeline will kick off. It'll test the syntax, look for any security issues, and alert a Slack channel if there are any. It'll then stage a full version of the environment and test it. So, it stages all the pieces: the database, cache, and everything else, and tests it all to make sure that it works, just like we would be testing our production website.\n\n\"If that passes, then it allows you to see what your changes are going to do before you apply them,\" he continues. \"We call it Terraform plan. So, it runs Terraform plan on each piece of our infrastructure, and it'll tell us something like, 'Hey, we see 34 changes and 2 destructions and 1 creation in this environment. Click here to review.' Then the group will review it and if it looks good, we'll apply it in production. Having that as a full pipeline is really great.\"\n\n>“Now it's so easy to deploy something and roll it back if there's an issue. It's taken the stress and the fear out of deploying into production.” – Dave Bullock, Director of Engineering\n\n### Easy learning curve\n\nSome of the Wag! engineers had working experience with GitLab, while others had not. Nonetheless, Bullock found the onboarding of his teams to be a fairly easy process due to the intuitive nature of the interface.\n\n\"I think once you kind of understand how CI works, it's basically about following things step by step,\" he says. \"Pipelines were a new concept to a lot of the team, but once you see it happening visually, it's really easy to understand what's going on, expand and add to it. It's a really useful interface. Seeing all those green dots or red dots makes it really clear what's going on.\"\n\n### Built-in security, shaving down test times and faster releases\n\nAs part of their ramp up in GitLab, the dog-walking service recently furled [automated security scanning and license management](/solutions/security-compliance/) into their workflow, with Bullock noting how \"great\" it is to have those features baked into the pipeline so that immediate action can be taken when needed.\n\nWag! currently issues three releases a day, with plans to bump that number up to eight or more. Since adopting GitLab, they have seen a massive improvement in the amount of time spent on the release process. **What previously took 40 minutes to an hour to accomplish, now takes just six minutes.**\n\n\"Traditionally, the release process was slow, fragile, and limited to only a few key release engineers who had access to 10 different systems to monitor, make changes, and log into to make updates and pull in the latest code. It was not optimal. Now it's literally a single pane of glass. A lot of it just happens automatically when you merge `develop` into `master` and tag it.\"\n\nThe release process time should improve even more once Wag! engineers switch from manually pushing parts of the release through to automating the process.\n\n\"Right now, we're still clicking through the interface and saying, 'Okay, do this, now let's monitor,'\" says Bullock. \"But I think as we become more comfortable with it, we'll go to fully automated deployments. Literally, just let it go and deploy. If we see an uptick in errors, we'll let it roll back on its own. But as it is now, it's so easy to deploy something and roll it back ourselves if there's an issue. It's taken the stress and the fear out of deploying into production.\"\n\n### Adopting DevOps\n\nWag!'s engineering team has big plans for 2019. They are currently in the process of moving their repositories from GitHub to GitLab and are planning to switch from Amazon ECS to [Kubernetes](/solutions/kubernetes/). This is all part of their roadmap to implementing DevOps.\n\n\"I think we're going to start working on the project in Q1 and it will be really awesome to have all the bells and functionality,\" Bullock says. \"We're excited about Auto DevOps and a lot of new things GitLab has coming down the pipeline. We're going to push pretty hard on that this year.\n\n\"I'm a big fan of DevOps in general, so I think the closer that you can bring the development engineers to the ops side, the better things work,\" he adds. \"I would love for every software engineer or backend engineer to take ownership of the environment that their code runs in, or at least be able to experiment with it and kind of instantly just spin up a full working environment that is the same as our production environment, which we do now, but not with Kubernetes. I think removing that friction is great.\"\n\n### Growing with GitLab\n\nGitLab's releases are a treat the folks at Wag! look forward to checking out each month. The rollout of new features, which are partly determined by user feedback, tend to correlate with the engineering needs of the growing dog-walking and boarding service.\n\n\"I think it's exciting that as we're growing and adding interesting pieces to our infrastructure and application, we're seeing GitLab grow with your monthly release cycles,\" says Bullock. \"Every month there's some new stuff that we're like, 'Oh cool, we could use that, that's perfect.' It's nice to have GitLab as a partner that's growing with us, and it's exciting to see the parallels of new features that you're launching and how it's solving our problems and optimizing things. There's all kinds of cool stuff, and every time we start using a new piece of GitLab, I feel like, 'Okay, that's great, we're really getting our money’s worth.'\"\n\nPhoto by [Andrii Podilnyk](https://unsplash.com/photos/dWSl8REfpoQ?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/dog-walk?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[793,976,749,9,875,1158,1829,683],{"slug":8499,"featured":6,"template":686},"wag-labs-blog-post","content:en-us:blog:wag-labs-blog-post.yml","Wag Labs Blog Post","en-us/blog/wag-labs-blog-post.yml","en-us/blog/wag-labs-blog-post",{"_path":8505,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8506,"content":8512,"config":8517,"_id":8519,"_type":14,"title":8520,"_source":16,"_file":8521,"_stem":8522,"_extension":19},"/en-us/blog/want-a-better-devops-career-learn-the-business",{"title":8507,"description":8508,"ogTitle":8507,"ogDescription":8508,"noIndex":6,"ogImage":8509,"ogUrl":8510,"ogSiteName":670,"ogType":671,"canonicalUrls":8510,"schema":8511},"Want a better DevOps career? Learn the business","A better DevOps career starts with a thorough understanding of business. Here's how to get started.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669715/Blog/Hero%20Images/synchronous-collaboration-as-a-remote-designer.jpg","https://about.gitlab.com/blog/want-a-better-devops-career-learn-the-business","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Want a better DevOps career? Learn the business\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Johanna Ambrosio\"}],\n        \"datePublished\": \"2022-03-17\",\n      }",{"title":8507,"description":8508,"authors":8513,"heroImage":8509,"date":8514,"body":8515,"category":679,"tags":8516},[4963],"2022-03-17","\nIf it’s time to add to your skill set and improve your DevOps career, a new programming language is always a good choice, but a fundamental understanding of your company’s business might be better. \n\nSpending time to understand the “business side” isn’t just a nice-to-have – it can literally be the difference between remaining an individual contributor or moving into management. It’s so important that in our 2021 Global DevSecOps Survey, respondents ranked “subject matter expertise” as one of the top skills they’d need for their future DevOps careers. \n\nIf you plan to stay a pure technologist and don’t want to manage anyone else or engage in strategy development, you can stop reading now. But if you want to jumpstart your DevOps career, be prepared to put in a couple of hours each week on the following six areas of subject matter expertise. (This is all while [staying current with your tech skills](/blog/the-top-skills-you-need-to-get-your-devops-dream-job/), of course.) Enlist your HR department, your manager, and your mentor(s) for information and start adding to your DevOps career right away. \n\n**Find out all you can about your company.** Yes, you probably got a bit of this when you first started working there, but you could likely use a deeper dive or a refresher. If your company has a knowledge-sharing wiki or library that includes materials about the company’s history and background, make that your go-to. Do a web search. Really explore your company’s website. What’s on the home page? What are the major sections of the site, and what’s being promoted and/or explained to your company’s customers? (And do re-check from time to time; this isn’t a one-and-done process.) \n\nIf your company started out doing X and shifted to Y, when did that happen, and why? (If you’re on Slack or another company-wide communication platform, those can be great places to ask about the past and course corrections.). Soak up any history and as much of the culture as possible. \nLearn about the business you’re in. If your company manufactures widgets, become better-versed in the fundamentals of widget manufacturing. The web is your friend here; you can learn tons for free. Here are some questions to ask:\n\n- Does the company make or create everything it sells, or does it partner with others? \n- How does the manufacturing process work? \n- Where are the plants? \n- Is the company hitting snags these days because of shipping problems or shortages of parts? What’s it doing to address these? \n- What are the major trends affecting the business now, and what’s projected for the next couple of years? \n\n**Search for analyst reports about the industry you’re in.** And even if you can’t get the full reports without paying for them, you can soak up enough from the key takeaways or executive summaries to understand the most important trends. Find out which key publications – online or paper – your management reads to keep up with the industry. Subscribe, or at least read them from time to time.\n\n**Do some competitive research.** You don’t need to create a hugely detailed competitive analysis, of course, but know your firm’s major business rivals –- who they are, what sets them apart from each other, and what differentiates your own company from the rest of the pack. Your marketing department likely already has this document.\n\n**Absorb all you can about your company’s external customers.** Who are they and what products and services do they buy from your firm? If your company’s done focus groups, or surveys, or anything to do with finding out about customer preferences, read through at least the executive summaries to get the big picture. Again, the marketing department will probably have materials you can read.\n\n**Acquire essential business know-how.** Basic [communication skills](/blog/soft-skills-are-the-key-to-your-devops-career-advancement/) – both oral and written – are key to doing pretty much anything on the job, no matter your role or seniority. It’s essential to be both concise and clear, and those are learned aptitudes, not bestowed at birth. As you progress in your career, you’ll need to be able to communicate with internal customers and make presentations to managers and others. \n\n**Seek out leadership, problem-solving, and negotiation skills to improve how you work with others.** Those skills will also help you get to consensus in meetings as quickly as possible. Basic financial management is also key ([Coursera courses, books, or a community college](https://www.coursera.org/learn/finance-for-non-financial-managers)are good options); you’ll want to learn how to shepherd tech projects that come in at or under budget and understanding some level of finance will save you when talking to higher-ups who are all about the bottom line. \nPractice (or learn) time management skills. Yes, you depend on others for pieces of the projects you work on. But you should learn to use your own time most effectively and not be *The Person Who Holds Everything Up* or is hopelessly disorganized anytime someone asks you a question. This will also help you juggle multiple projects without crashing and burning or having to work 12-hour days. Bonus: These techniques can be very helpful in your personal life also.\n\nYour DevOps career goal with learning all of this is to develop the knowledge and tools you need to think broadly about how tech can solve problems, make or save money, create new products and services, and delight customers. \n\nThe more you know about your company, your customers, and the business you’re in, the more you’ll be able to combine that knowledge with your tech smarts. Yours might be the next game-changer idea that results in your promotion or a nice, fat bonus. The sky’s the limit.\n\n\n",[9,813,749],{"slug":8518,"featured":6,"template":686},"want-a-better-devops-career-learn-the-business","content:en-us:blog:want-a-better-devops-career-learn-the-business.yml","Want A Better Devops Career Learn The Business","en-us/blog/want-a-better-devops-career-learn-the-business.yml","en-us/blog/want-a-better-devops-career-learn-the-business",{"_path":8524,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8525,"content":8530,"config":8535,"_id":8537,"_type":14,"title":8538,"_source":16,"_file":8539,"_stem":8540,"_extension":19},"/en-us/blog/want-faster-releases-your-answer-lies-in-automated-software-testing",{"title":8526,"description":8527,"ogTitle":8526,"ogDescription":8527,"noIndex":6,"ogImage":928,"ogUrl":8528,"ogSiteName":670,"ogType":671,"canonicalUrls":8528,"schema":8529},"Want faster releases? Your answer lies in automated software testing","The trouble with testing? Nearly everything! Here's why automated software testing is so hard to get right, and how a DevOps platform can help.","https://about.gitlab.com/blog/want-faster-releases-your-answer-lies-in-automated-software-testing","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Want faster releases? Your answer lies in automated software testing\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2021-09-30\",\n      }",{"title":8526,"description":8527,"authors":8531,"heroImage":928,"date":8532,"body":8533,"category":769,"tags":8534},[851],"2021-09-30","\n\nFor three years in a row, our Global DevSecOps Survey found testing was the number one reason (by large margins) for release delays. A lack of automated software testing, combined with too many manual tests conducted too late in the process, was a story told time after time, and it certainly was one without any kind of happy ending.\n\nDespite the undeniable progress DevOps has brought to software development, integrating automated software testing into the lifecycle has remained an elusive goal for many teams. Here’s a look at why testing is such a difficult step to get right, and how an integrated DevOps Platform can bring much-needed structure to the process.\n\n## The state of automated software testing\n\nAccording to our [2021 Survey](/developer-survey/), it’s safe to say respondents are _frustrated_ with software testing.\n\n_“Testing can be slow in both writing and running.”_\n\n_“Testing delays everything.”_\n\nWhile there is forward momentum (almost 25% of teams say they’re fully automated - more than double the number from 2020), the same percentage reported zero automation or that they’re just beginning to think about it. \n\n_“Automated testing is ignored ‘due to time constraints.’”_\n\nBut even teams that haven’t ignored automated software testing are hamstrung because the vast majority don’t give developers scan results **within their IDEs.** Fewer than 25% of teams enable [SAST](/blog/developer-intro-sast-dast/) lite scanners in a web IDE and only 20% put results in a web pipeline report for developers. The situation is even worse when it comes to DAST, dependency and container scans: just 16% make DAST/dependency scan data available, and 14% do the same for container scans. While the percentage of teams reporting full automated software testing increased from 2020 to 2021, the percentage giving devs access to key test data barely changed in the same time frame.\n\n## Context switching makes everything hard\n\nThe fact that developers can’t easily get access to test results is a huge productivity blocker. “The best time to (fix bugs) is when I’m in \"flow\" - right when I’m writing the code and have a mental model of all of the things and how they are interconnected,” explained [Brendan O’Leary](/company/team/#brendan), senior developer evangelist at GitLab, in a blog post last year talking about [the developer-security divide](/blog/developer-security-divide/). “So that’s basically the same day or same week as when I wrote it.”\n\n**Elevating your DevOps skills? Join us at [Commit at KubeCon](/events/commit/) - Oct. 11!**\n\nSo while not getting results “in the flow” is a huge stumbling block, developers are adamant about the importance of testing. When we asked developer respondents in our 2021 Survey what they wished they could do more of, testing was, by far, the number one response. \n\nWhat’s the solution to this conundrum? More automation, [more AI/ML](/blog/ai-in-software-development/) and a [DevOps platform](/solutions/devops-platform/) to make everything seamlessly interconnected, visible and actionable.\n\nGeo-sharing company Glympse offers an object lesson on [the benefits of a DevOps platform](/customers/glympse/). The company was using approximately 20 tools to get its software out the door, but after moving to GitLab’s DevOps Platform, the process was dramatically streamlined. Deployments have dropped from four hours to  less than 30 minutes, and deployment fatigue, particularly around testing and code reviews, has vanished. \n\n## The struggle is real, but worth it\n\nFor teams who’ve tamed the automated software testing beast, and are humming along in their DevOps practices, the benefits are substantial. Here’s what they told us in our 2021 Survey:\n\n_“We are not relying on developers to have remembered to create and run tests for their code before deploying.”_\n\n_“We automate everything possible, to be able to test our product ‘like in real life’ without any downside. This increases confidence and simplifies tests for everything.”_\n\n_“Integration testing has been a big plus in how confident we are to release automatically and deliver a version. We are now able to deliver any day.”_\n\n_“It helps that devs don't need to keep track of test running; they just need to push and pipeline will check their code before merge to master.”_\n",[1158,9,681],{"slug":8536,"featured":6,"template":686},"want-faster-releases-your-answer-lies-in-automated-software-testing","content:en-us:blog:want-faster-releases-your-answer-lies-in-automated-software-testing.yml","Want Faster Releases Your Answer Lies In Automated Software Testing","en-us/blog/want-faster-releases-your-answer-lies-in-automated-software-testing.yml","en-us/blog/want-faster-releases-your-answer-lies-in-automated-software-testing",{"_path":8542,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8543,"content":8549,"config":8555,"_id":8557,"_type":14,"title":8558,"_source":16,"_file":8559,"_stem":8560,"_extension":19},"/en-us/blog/want-secure-software-development-our-top-5-tips-to-bring-dev-and-sec-together",{"title":8544,"description":8545,"ogTitle":8544,"ogDescription":8545,"noIndex":6,"ogImage":8546,"ogUrl":8547,"ogSiteName":670,"ogType":671,"canonicalUrls":8547,"schema":8548},"Developing secure software: Top tips for dev-sec integration","Every DevOps team wants secure software development but it's surprisingly hard to achieve. Here are 5 strategies to bring dev and sec together.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679444/Blog/Hero%20Images/twotogether.jpg","https://about.gitlab.com/blog/want-secure-software-development-our-top-5-tips-to-bring-dev-and-sec-together","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Want secure software development? Our top 5 tips to bring dev and sec together\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-01-10\",\n      }",{"title":8550,"description":8545,"authors":8551,"heroImage":8546,"date":8552,"body":8553,"category":679,"tags":8554},"Want secure software development? Our top 5 tips to bring dev and sec together",[851],"2022-01-10","\nThe most productive DevOps teams achieve secure software development by baking sec in from the start. That’s a worthwhile goal, but the reality is developers and security teams don’t always get along. From squabbles around where the buck stops to finger-pointing about finding and fixing bugs, dev and sec often struggle to get on the same page.\n\nAt a time when the security stakes have never been higher, dev and sec simply have to figure it out.\n\nHere are our top five tips to [bridge the gap between dev and sec](/blog/developer-security-divide/) and truly welcome security into the DevOps fold.\n\n**1. Forget the past**\n\nIn the bad, old days, a security officer swooped in when code was hitting production to point out problems and demand changes, often with little to no context or explanation. Developers didn’t exactly jump all over themselves to cooperate. TL;DR there’s plenty of blame to explain the lack of secure software development.\n\t\nThankfully, DevOps and modern application development bring fresh narratives and workflows. Nearly 28% of security pros now work in cross-functional DevOps teams, according to our [2021 Global DevSecOps Survey](/developer-survey/). And over 70% have shifted security left, the survey found. \n\nWhat’s the secret to their success? It’s all about DevOps and the [technology changes required to do it successfully](https://about.gitlab.com/blog/elite-team-strategies-to-secure-software-supply-chains/). Our survey found that teams settled on DevOps for better code quality and faster release times, but the tech choices to support that success – automated testing, security scans, and shift-left security – actually ended up bringing dev and sec closer together.\n\n_The takeaway_: The right technology is surprisingly helpful in breaking down stereotypes.\n\n**2. Learn each other’s languages**\n\nClearly, dev and sec have an ongoing communication problem. \n\nIn fact, they can’t even agree on who “owns” security, as we saw in our survey. A sec pro told us, “Security must be a practice of every member of the team from the front-end developer to the system administrator (and also non-tech roles),” while a dev said, “It’s all up to the developer!”\n\nWork needs to happen, and it starts with the very old-fashioned concept of getting to know one another. A sec pro could attend a developer meet-up, and a dev could sit in on a security retro. For some teams, this is going to have to be a forced function where management mandates cross-functional “lunch and learns,” virtual offsites, or even ice breakers.\n\n_The takeaway_: Yes, even an escape room (or other bonding exercises) can [help a team start to speak the same language](https://blog.hslu.ch/majorobm/2019/03/15/escape-rooms-a-great-team-building-activity/).\n\n**3. Institute a security champions program**\n\nIf you can’t beat them, join them, or in this case, embed them. [Developer security champions]( https://devops.com/devops-security-champion-who-what-and-why/) are known and trusted devs who have an interest and enthusiasm for security and want to share it with colleagues. This can be a very successful strategy to actually shift security left and change mindsets forever. \n\nSecurity champions can be part of a formalized program led by the sec team, or grow in a more organic fashion via an enthusiastic dev. Either way, [experts suggest this is a solid way to bring a DevOps team to DevSecOps](/blog/why-security-champions/).\n\n_The takeaway_: Sometimes the message is heard and understood most clearly from an insider.\n\n**4. Meet dev and sec where they are**\n\nIt’s tough to hold a dev accountable for security problems when the vast majority of them aren’t taught about it in college. And sec pros don’t necessarily know how to code. So is it any surprise that two very different skill sets, degree programs, and job requirements might find it hard to come together?\n\nIt’s not surprising but it is problematic. [One solution](https://techbeacon.com/security/why-developers-dislike-security-what-you-can-do-about-it) involves both sides (figuratively) going back to school. Devs can get hands-on training in security, while sec pros learn how to code.\n\nAlso DevOps managers might consider adding [“security software developer”](https://cybersecurityguide.org/careers/security-software-developer/) to the 2022 roster. This fairly new job title has [over 1,000 postings on Glassdoor.com](https://www.glassdoor.com/Job/united-states-security-software-engineer-jobs-SRCH_IL.0,13_IN1_KO14,40.htm?clickSource=searchBox).\n\n_The takeaway_: Continuing education and cross-functional training can yield enormous benefits.\n\n**5. Make the experience real**\n\nActions can speak louder than words, so why not let developers experience, first-hand, what’s involved in a security breach (and, by implication, what the stakes are)? Invite devs to every hacking exercise planned, and get extra points if a [security red team](https://csrc.nist.gov/glossary/term/red_team_blue_team_approach) is involved. \n\t\nAt the same time, introduce security pros to the user experience (UX) team, and invite them to meet with actual users and hear real-time feedback. \n\n_The takeaway_: It’s impossible to feel anything but invested if you truly feel like you’re part of the process.\n",[9,875,683],{"slug":8556,"featured":6,"template":686},"want-secure-software-development-our-top-5-tips-to-bring-dev-and-sec-together","content:en-us:blog:want-secure-software-development-our-top-5-tips-to-bring-dev-and-sec-together.yml","Want Secure Software Development Our Top 5 Tips To Bring Dev And Sec Together","en-us/blog/want-secure-software-development-our-top-5-tips-to-bring-dev-and-sec-together.yml","en-us/blog/want-secure-software-development-our-top-5-tips-to-bring-dev-and-sec-together",{"_path":8562,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8563,"content":8568,"config":8573,"_id":8575,"_type":14,"title":8576,"_source":16,"_file":8577,"_stem":8578,"_extension":19},"/en-us/blog/ways-to-encourage-collaboration",{"title":8564,"description":8565,"ogTitle":8564,"ogDescription":8565,"noIndex":6,"ogImage":6184,"ogUrl":8566,"ogSiteName":670,"ogType":671,"canonicalUrls":8566,"schema":8567},"3 Ways to foster collaboration","Want to know how we encourage everyone to contribute?","https://about.gitlab.com/blog/ways-to-encourage-collaboration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"3 Ways to foster collaboration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2017-06-12\",\n      }",{"title":8564,"description":8565,"authors":8569,"heroImage":6184,"date":8570,"body":8571,"category":2241,"tags":8572},[1378],"2017-06-12","\n\nWe know that [collaboration is critical](/blog/why-collaboration-tools-matter/) for organizations moving towards a DevOps culture. Here's how we encourage collaboration in our workflow at GitLab.\n\n\u003C!-- more -->\n\n## 1. We make suggesting changes less scary\n\nUsing version control for more than just your source code means that everyone feels free to contribute to documentation, configurations, tests and whatever else you're working on. With the benefit of [merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/), it's possible to suggest a change or an improvement, or even just query something that isn't entirely clear or could be described better, without just going ahead and making the change immediately. This invites discussion and prevents less experienced team members from feeling nervous to voice their opinions.\n\n>\"It really makes the documentation, similar to the source code, an open source and living document that everyone can contribute to.\" – GitLab Platform Backend Lead, [Douwe Maan](/company/team/#DouweM)\n\n## 2. We open our development platform\n\nBy giving everyone in your organization access to view what other teams are working on, you allow everyone to discover and contribute beyond their own projects. This [inner sourcing](https://en.wikipedia.org/wiki/Inner_source) approach makes it more likely that team members can learn from others or offer suggestions from their own experience that could be applied to a different project, avoiding duplication of work. Douwe explains: \"It's working together to make all of our code better, because if we use a shared library – even if it’s just an internal one – if one person improves it or fixes a bug or increases the functionality of that application, that’s work by one person that will immediately affect all the different teams.\"\n\n## 3. We make code review impersonal\n\nEveryone is encouraged to [review each other's code](https://www.youtube.com/watch?v=XluG9mAQdSo&feature=youtu.be) or ask for input, and the focus of that review is firmly on improving the code. The approach is not to say, \"This is wrong, change it to this,\" which can be really demotivating. We use language like, \"Have you considered this?\" or \"What do you think about this?\"\n\nThis not only makes code review less scary for the person whose merge request is being reviewed, it also makes it less intimidating for other team members to weigh in on more senior team members' work.\n\n>\"Review is really something we all do together. Even the most junior person or just someone who doesn’t really know this part of the application yet, if they see something that doesn’t quite look right to them or something they might have a question about, it’s really useful if you make them feel free to comment on that.\" - Douwe\n\nBy removing the barriers to contribution and making it easy and encouraged to offer input, even where team members have less experience, we've built a culture around collaboration and learning from others' expertise. Fostering collaboration across different teams and functions is just one element of a DevOps culture – to learn more, watch our webcast, \"[Managing the DevOps Culture Shift](https://www.youtube.com/watch?v=py8c6-3zyKM&feature=youtu.be)\" on demand now.\n\n*How does your team encourage everyone to contribute? Tell us in the comments!*\n\n\u003C!-- cover image: https://unsplash.com/search/street-art?photo=PVw_vtpCGaM-->\n",[999,749,683,9],{"slug":8574,"featured":6,"template":686},"ways-to-encourage-collaboration","content:en-us:blog:ways-to-encourage-collaboration.yml","Ways To Encourage Collaboration","en-us/blog/ways-to-encourage-collaboration.yml","en-us/blog/ways-to-encourage-collaboration",{"_path":8580,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8581,"content":8587,"config":8592,"_id":8594,"_type":14,"title":8595,"_source":16,"_file":8596,"_stem":8597,"_extension":19},"/en-us/blog/we-are-building-a-better-heroku",{"title":8582,"description":8583,"ogTitle":8582,"ogDescription":8583,"noIndex":6,"ogImage":8584,"ogUrl":8585,"ogSiteName":670,"ogType":671,"canonicalUrls":8585,"schema":8586},"We are very far from a better Heroku for production apps in a hyper cloud","GitLab is building Heroku for production apps in hyper clouds, integrated into your DevSecOps workflow: The 5 minute production app.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672405/Blog/Hero%20Images/spacex-unsplash.jpg","https://about.gitlab.com/blog/we-are-building-a-better-heroku","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We are very far from a better Heroku for production apps in a hyper cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2021-03-22\",\n      }",{"title":8582,"description":8583,"authors":8588,"heroImage":8584,"date":8589,"body":8590,"category":1359,"tags":8591},[2473],"2021-03-22","\n\n{::options parse_block_html=\"true\" /}\n\n\n\n> Update: This post does not live up to its original title `We are building a better Heroku`. It shows my own personal experience and reflects poorly on competitors. I am sorry about that.\n>\n> It should have emphasized the _building_ part, we're just starting. The current 5 minute production app doesn't hold a candle to Heroku at the moment.\n> It should have made it clear the goals is to improve the speed with which you can configure a production app, not a development app. Development apps on Heroku are already close to perfect. The examples in this post are contrived since it talks about a development app, as [rightly called out by Heroku people](https://twitter.com/johnbeynon/status/1374306499426652161).\n> It should have gone into [why hyper clouds might be preferable](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template#hypercloud).\n> It should have talked about state, we made a small improvement in [this MR](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/78028/diffs) but we should have done the [planned work](https://gitlab.com/gitlab-com/www-gitlab-com/-/issues/11137) and made one post out of it.\n>\n> We are very far from a better Heroku for production apps in a hyper cloud.\n\nCreating a web application has become very convenient and easy. You’ll start in your local development environment, run a dev server and verify the changes looking good. At a certain point, you want to share it with your friends on the internet. A service or server?\n\n### Use Heroku\n\nI have been a backend developer in the past 20 years. Web development is often fighting with Javascript and CSS. Especially Heroku as a deployment platform is a new area for me.\n\nLet's start with creating an account, login, and follow the web instructions to create a new app in the [documentation](https://devcenter.heroku.com/).\n\nLet’s try a fun demo, a battleship game to learn Javascript on the client and NodeJS on the server.\n\n```\n$ cd ~/dev/opensource\n$ git clone https://github.com/kubowania/battleships\n$ cd battleships\n```\n\nTest it locally, optional.\n\n```\n$ npm install\n$ npm start\n```\n\nInstall the Heroku CLI, on [macOS with Homebrew](/blog/dotfiles-document-and-automate-your-macbook-setup/).\n\n```\n$ brew install heroku/brew/heroku\n\n$ heroku autocomplete\n```\n\nThis opens a new browser window to login. Lets create an app.\n\n```\n$ heroku create\nCreating app... done, ⬢ nameless-mountain-48655\nhttps://nameless-mountain-48655.herokuapp.com/ | https://git.heroku.com/nameless-mountain-48655.git\n```\n\nThe CLI command adds a new Git remote called `heroku` where we need to push into.\n\n```\n$ git push heroku main\n\nremote: -----> Launching...\nremote:        Released v3\nremote:        https://nameless-mountain-48655.herokuapp.com/ deployed to Heroku\nremote:\nremote: Verifying deploy... done.\n```\n\nDeployed in less than 5 minutes. Getting there and installing the pre-requisites on the CLI took longer than expected.\n\n![Battleship web app deployed with Heroku](https://about.gitlab.com/images/blogimages/better-heroku-5min-prod-app/battleship_heroku.png){: .shadow.medium.center}\n\nLots of CLI commands involved, and it did not run in a CI/CD pipeline with additional tests before deploying it. Now the web application is deployed into a black box. Want to use Let’s Encrypt and your own domain name? How about adding the deployment natively to GitLab to have a single application in your DevOps workflow?\n\n#### Setting up Persistence with Heroku\n\nThis gets more challenging. Imagine that your app uses a relational database, a caching layer and object storage. This requires lots of CLI commands and a deep dive into the application configuration. We did not touch persistent backends in the demo app above yet.\n\nHeroku offers [PostgreSQL](https://devcenter.heroku.com/categories/postgres-basics), [Redis](https://devcenter.heroku.com/categories/heroku-redis) and [AWS S3](https://devcenter.heroku.com/articles/s3).\n\n```\nheroku addons:create heroku-postgresql:hobby-dev\nheroku addons:create heroku-postgresql:hobby-dev --version=10\n\nheroku pg:promote HEROKU_POSTGRESQL_YELLOW\n```\n\n```\nheroku addons:create heroku-redis:hobby-dev -a 5-min-prod-app\n```\n\nNote that the default `hobby-dev` plan allows unencrypted connections too.\n\n```\nheroku config:set S3_BUCKET_NAME=appname-assets\nheroku config:set AWS_ACCESS_KEY_ID=xxx AWS_SECRET_ACCESS_KEY=yyy\n```\n\nAll stateful backends in Heroku need to be secured. This requires more commands to create self-signed certificates and encrypt transport layers in the backend.\n\nAfter all, is there a better way to automate requesting stateful backend services and automate their provisioning?\n\n### A better Heroku: The 5 minute production app\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">the modern tech industry is basically folks just endlessly remaking remakes of heroku\u003C/p>&mdash; Always Miso (@monkchips) \u003Ca href=\"https://twitter.com/monkchips/status/1368924845740810249?ref_src=twsrc%5Etfw\">March 8, 2021\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">Truth \u003Ca href=\"https://t.co/AFN9anBbQG\">https://t.co/AFN9anBbQG\u003C/a>\u003C/p>&mdash; Sid Sijbrandij (@sytses) \u003Ca href=\"https://twitter.com/sytses/status/1368982067229253632?ref_src=twsrc%5Etfw\">March 8, 2021\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n\nCloud resources are cheap. AWS offers a free tier, HashiCorp Terraform has become an excellent tool to manage multi-cloud resources and GitLab integrates app packaging, container registry, deployment and TLS certificates.\n\nThere’s more application goodies: Provision a PostgreSQL VM, add Redis, SMTP email transport, custom domains with Let’s Encrypt.\n\n#### Use the 5 minute production app\n\nThe [documentation](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template#usage) says to create a new AWS IAM role with credentials for automation.\n\nThe second step is to have the source code available in a GitLab project. You can use `New project > Import project > Repo by URL` to automatically import the GitHub repository `https://github.com/kubowania/battleships.git`.\n\n![Import the GitHub repository into GitLab](https://about.gitlab.com/images/blogimages/better-heroku-5min-prod-app/gitlab_new_project_import_github_url.png){: .shadow.medium.center}\n\nOnce imported, navigate into `Settings > CI/CD > Variables` to specify the AWS credentials and region. Ensure to tick the `Masked` checkbox to hide them in all job logs.\n\n![Configure AWS credentials as masked CI/CD variables](https://about.gitlab.com/images/blogimages/better-heroku-5min-prod-app/gitlab_5minprodapp_aws_cicd_variables.png){: .shadow.medium.center}\n\nNavigate back into the project overview. Click the `Setup CI/CD` button or open the Web IDE to create a new `.gitlab-ci.yml` file. Add the remote CI/CD template include like this:\n\n```\nvariables:\n    TF_VAR_DISABLE_POSTGRES: \"true\"\n    TF_VAR_DISABLE_REDIS: \"true\"\n\ninclude:\n  remote: https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template/-/raw/stable/deploy.yml\n```\n\nThe battleship application does not need the PostgreSQL and Redis backends. They are disabled with setting `TF_VAR_DISABLE_POSTGRES` and `TF_VAR_DISABLE_REDIS` [variables](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template/-/blob/master/VARIABLES.md) to `false`.\n\nCommit the change to the default branch.\n\n8:43pm CET: Pipeline started with the build job. 2 min 33 sec.\n\n![GitLab pipeline builds the Docker image with Auto-Build](https://about.gitlab.com/images/blogimages/better-heroku-5min-prod-app/gitlab_5minprodapp_pipeline_01.png){: .shadow.medium.center}\n\n8:45pm CET: Pipeline runs terraform_apply to provision AWS resources in 2min 47 sec.\n\n![GitLab pipeline runs Terraform to provision cloud resources in AWS](https://about.gitlab.com/images/blogimages/better-heroku-5min-prod-app/gitlab_5minprodapp_pipeline_02.png){: .shadow.medium.center}\n\n8:48pm CET: Deployed in 1 min 11 sec.\n\nThe deploy job log greets with the URL in ~5 minutes, including a Lets Encrypt TLS certificate. There we go, let’s play some battleship!\n\n![Battleship web app deployed in AWS with the 5 minute production app](https://about.gitlab.com/images/blogimages/better-heroku-5min-prod-app/battleship_5minprodapp_aws.png){: .shadow.medium.center}\n\nNote that we never left the browser and there is no CLI involved. Next to the included template, there’s also room for adding more CI tests and security best practices while hacking on this project. You can navigate into your AWS console for debugging and troubleshooting and plan with production budgets, where needed.\n\n#### Setting up Persistence with the 5 Minute Production App\n\nRemember the stateful backends with Heroku above? By default, the 5 minute production app takes care of provisioning:\n\n- PostgreSQL server and secured backend\n- Redis cluster\n- S3 object storage in AWS\n\nThe 5 minute production app uses the managed stateful services of a hypercloud so your data is persisted and secure. By leveraging these managed services (databases, caching, objects storage, etc.) you have less to maintain. Everything is provisioned through Terraform which has the following advantages:\n\n- Terraform is the most popular IaC tool.\n- Terraform works accross platforms.\n- Terraform is well-documented.\n- Terraform state can be [stored and viewed in GitLab](https://docs.gitlab.com/ee/user/infrastructure/#gitlab-managed-terraform-state).\n- You avoid the cost and complexity of Kubernetes.\n- You have complete control to customize and extend.\n\nWe will explore more stateful backends in future apps and blog posts.\n\n### 5 minute production app + DevSecOps = ❤️\n\nExample for [Dependency Scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/) and [SAST](https://docs.gitlab.com/ee/user/application_security/sast/analyzers.html):\n\n```\ninclude:\n  - remote: https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template/-/raw/stable/deploy.yml\n  - template: Dependency-Scanning.gitlab-ci.yml\n  - template: Security/SAST.gitlab-ci.yml\n```\n\n### More to use: Database backends, TLS, environments\n\nThis blog post covers the basic learning steps with Heroku and the 5 minute production app. A typical web app requires a database, storage or caching backend, which can get complicated to run with Heroku. We will explore the setup and production experience in future blog posts. In addition to backends, we will also look into TLS certificates and production environments in CD workflows.\n\nMeanwhile, try the 5 min production app yourself:\n\n* [5 minute production app docs](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template#the-5-minute-production-app)\n* [Example projects](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template#examples)\n* Your own future web app with [your custom domain](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template#custom-domain)?\n\nCover image by [SpaceX](https://unsplash.com/@spacex) on [Unsplash](https://unsplash.com/photos/OHOU-5UVIYQ)\n\n",[1041,9,977],{"slug":8593,"featured":6,"template":686},"we-are-building-a-better-heroku","content:en-us:blog:we-are-building-a-better-heroku.yml","We Are Building A Better Heroku","en-us/blog/we-are-building-a-better-heroku.yml","en-us/blog/we-are-building-a-better-heroku",{"_path":8599,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8600,"content":8606,"config":8612,"_id":8614,"_type":14,"title":8615,"_source":16,"_file":8616,"_stem":8617,"_extension":19},"/en-us/blog/welcome-kde",{"title":8601,"description":8602,"ogTitle":8601,"ogDescription":8602,"noIndex":6,"ogImage":8603,"ogUrl":8604,"ogSiteName":670,"ogType":671,"canonicalUrls":8604,"schema":8605},"Why the KDE community is #movingtogitlab","Open source software community giant KDE finished phase one of their migration to GitLab and has joined our GitLab open source program. Check out what's next for KDE and GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681382/Blog/Hero%20Images/migratingbirds.jpg","https://about.gitlab.com/blog/welcome-kde","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why the KDE community is #movingtogitlab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nuritzi Sanchez\"}],\n        \"datePublished\": \"2020-06-29\",\n      }",{"title":8601,"description":8602,"authors":8607,"heroImage":8603,"date":8609,"body":8610,"category":1318,"tags":8611},[8608],"Nuritzi Sanchez","2020-06-29","\n\nThe [KDE community](https://kde.org/) is [#movingtogitlab](https://twitter.com/hashtag/movingtogitlab)! After announcing the original decision to migrate to GitLab in November 2019, KDE has officially completed phase one of their migration, and contributors have begun to use GitLab on a daily basis at invent.kde.org. Read on to learn more about KDE's migration story.\n\n## About KDE\n\nKDE is an international community that creates open source software for desktops and mobile devices. KDE software is compatible with multiple platforms, including GNU/Linux, FreeBSD, Windows, macOS, and Android. Their products are used by millions of home and office workers and are being deployed in schools around the world.\n\nWith more than 2,700 artists, designers, programmers, translators, writers, and other contributors from across the globe, the KDE community is thriving.\n\nTogether, this community creates and maintains more than 200 applications and countless add-ons, plugins, and Plasmoids, 1000+ repositories, 80+ frameworks for Qt developers, and more than 2,600 projects. KDE software is translated into more than 100 languages to enable vast global reach.\n\n## Why KDE moved to GitLab\n\nOne of the main reasons that KDE decided to move to GitLab is to improve the newcomers story and make it easier to start contributing to KDE software. As [Aleix Pol](https://ev.kde.org/corporate/board/), President of KDE e.V says, \"Adopting GitLab has been a natural next step for us. Simplifying the onboarding experience for new contributors is one of our main goals in the KDE community. Being able to allow project contributors to easily participate in how the products they maintain are tested and delivered will certainly be a turning point for our ecosystem.\"\n\n\"By using a platform offering an interface and workflow that most open source developers are nowadays familiar with, we are confident that we are lowering the bar for new contributors to join us, and are providing the foundation for our community to scale in the following years,\" added [Neofytos Kolokotronis](https://ev.kde.org/corporate/board/), member of KDE e.V.'s Board of Directors and a core member of KDE's Onboarding team.\n\nAnother important consideration for the KDE community was to move to a product that was well-supported and where feedback from the community would be taken into account. With a release every month, GitLab has fast-paced development and is actively maintained by the company and community alike. Community members help to shape the way the product is built, and there's an [open roadmap](/direction/) since [transparency is one of GitLab's core values](https://handbook.gitlab.com/handbook/values/#transparency).\n\nMoving to new tools is a lot of work for established communities like KDE. Migration decisions require careful communication and the complex task of gathering community consensus.\n\nThe KDE team made the decision to migrate away from its [former tech stack](https://gitlab.com/gitlab-org/gitlab/-/issues/24900#gitlab-replacements) after following a series of carefully designed steps. First, they talked to the sysadmin team and then formed a migration team to evaluate the move. Next, the sysadmin team completed a thorough study of GitLab's features and did an intake and comparison of the community's needs against those product features. Then, they created a process that allows KDE to run short test cycles with some projects, document the process, and provide feedback to the community.\n\nThe migration started by moving some smaller and more agile KDE teams that were very interested in testing and providing feedback. After this cycle was completed successfully, KDE started migrating teams with a larger codebase and more contributors. Once all of the major issues were resolved, they made the final switch for all remaining projects they planned to move. The sysadmin team documented the results after each step and shared them directly with the KDE community to receive feedback and gather consensus on how to proceed.\n\nAs the switch to GitLab fell directly under the scope of KDE's [\"Streamlined Onboarding of New Contributors\" goal](https://community.kde.org/Goals/Streamlined_onboarding_of_new_contributors), the KDE Onboarding team was also involved from the start, working very closely with the sysadmin team, who were leading the effort. The community was involved in the decision-making from the beginning, and stayed up-to-date on each phase of the migration, and all questions and concerns were answered and addressed along the way.\n\n\"This was a major change for us, but we are very satisfied with how our community collaborated over long discussion threads. We believe that by working together we made the best decisions as we moved forward,\" says Neofytos.\n\n## Migration challenges and solutions\n\nThe biggest challenge for KDE was the sheer volume of data they were dealing with and how it was integrated into the numerous tools in use (including [Phabricator](https://www.phacility.com/phabricator/)). With more than 1,000 repositories, this migration was a big undertaking.\n\nTo address this challenge, KDE decided to approach the migration in phases rather than do it all at once. By phasing the migration, they were able to deal with different data types, such as repositories and tasks, separately.\n\nKDE developed custom tools to make bulk updates easier throughout the migration process. These tools help set the name, description, and avatar of the projects alongside a number of settings, for example, protected branches, and merge methods. By using these custom tools for bulk updates, KDE was also able to avoid granting maintainer access to individual contributors. KDE only allows maintainer access for sysadmins per their access and permissions policy.\n\nKDE ported custom Git hooks to ensure that certain checks and actions continued after the move to Gitlab. These include checks to ensure file encodings match KDE requirements and that bugs on their Bugzilla installation were closed as needed.\n\nIn order to support their translation community, which still uses Subversion in their workflow, KDE also built tooling to export SSH keys from GitLab to avoid the need to update these in two places.\n\nKDE also adjusted the tools used to build and develop KDE software to make them compatible with the new repository structure in GitLab.\n\nAt this point, KDE overcame most of their migration hurdles. Once the preparation work was finished to clean up a number of systems to work more natively with GitLab, the actual migration took about one day.\n\nBut there are a few more challenges left before KDE can transition continuous integration (CI) and task management over to GitLab. To follow along with the KDE migration, you can take a look at the [list of issues that KDE is tracking](https://gitlab.com/gitlab-org/gitlab/-/issues/26581).\n\n## Architectural decisions\n\nA common challenge for organizations moving to GitLab is deciding how to structure their groups to best enable their community's workflows and allow them to abide by their policies.\n\nKDE decided to tackle this challenge by setting up a series of groups at the top level of GitLab to act as categories. KDE's 1,200 repositories were then sorted into each of these categories.\n\nKDE formed this architectural strategy to help make projects more discoverable. KDE wanted to avoid the impracticality of people needing to scroll endlessly through repositories. Setting up top-level categories also allows developers to get an easier overview of merge requests for the categories they are most interested in.\n\nWith regards to permissions, KDE uses a single master \"KDE Developers\" group to manage membership and permission levels. Everyone there is given \"Developer\" access. This group is then invited to all of the groups containing repositories except for the ones containing the KDE website and infrastructure repos. This method of dealing with permissions allows KDE to maintain a single source of truth.\n\n## GitLab + KDE = ❤️\n\nKDE is using the [Community Edition](/install/ce-or-ee/) of GitLab because of their commitment to open source. They are a member of our [GitLab for Open Source](/solutions/open-source/) program, and have been actively collaborating with GitLab team members throughout the migration. One of benefits of using the GitLab for Open Source program for large migration efforts is that the community often offers some extra assistance through the evaluation period and beyond.\n\nFor example, the GitLab for Open Source program has a [public tracker for KDE's migration](https://gitlab.com/gitlab-org/gitlab/-/issues/24900), which is used to communicate and better understand at a glance the issues that are especially important. This allows KDE, GitLab, and the larger open source community to collaborate on challenges together.\n\n\"GitLab's values of [collaboration](https://handbook.gitlab.com/handbook/values/#collaboration) and [transparency](https://handbook.gitlab.com/handbook/values/#transparency) really shine through,\" says Neofytos. We appreciate their openness to accepting merge requests from community members and considering proposals for new features. We have had a great experience so far collaborating with members of the GitLab community and members of the GitLab team – from developers to program managers to product owners alike.\"\n\nNow that phase one of the KDE migration is complete, we look forward to continuing to collaborate with KDE through the remaining phases of the migration.\n\n### Summary of the KDE migration\n\n * Phase 1: Code hosting & review ✅\n * Phase 2: CI\n * Phase 3: Task management for developers\n\n## How to contribute to KDE\n\nKDE has an amazing community and they welcome new members! Existing members are happy to provide feedback on newcomers' contributions with the goal of helping them learn. Every day more and more people join the ever-growing KDE family – and there's always room for more!\n\nKDE has a rich infrastructure of web resources, forums, mailing-lists, IRC (chat), and many other ways to get in touch. To learn more about joining the KDE community, visit their \"[Get Involved](https://community.kde.org/Get_Involved)\" page, which offers guidance to all contributors from all backgrounds.\n\n",[682,1829,9],{"slug":8613,"featured":6,"template":686},"welcome-kde","content:en-us:blog:welcome-kde.yml","Welcome Kde","en-us/blog/welcome-kde.yml","en-us/blog/welcome-kde",{"_path":8619,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8620,"content":8626,"config":8631,"_id":8633,"_type":14,"title":8634,"_source":16,"_file":8635,"_stem":8636,"_extension":19},"/en-us/blog/welcome-to-the-devops-platform-era",{"title":8621,"description":8622,"ogTitle":8621,"ogDescription":8622,"noIndex":6,"ogImage":8623,"ogUrl":8624,"ogSiteName":670,"ogType":671,"canonicalUrls":8624,"schema":8625},"Welcome to the DevOps Platform era","GitLab CEO Sid Sijbrandij reflects on the evolution of DevOps and the emergence of the DevOps Platform as the solution for businesses wanting to deliver software faster, more securely, and at a lower cost.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668101/Blog/Hero%20Images/dop_cover.png","https://about.gitlab.com/blog/welcome-to-the-devops-platform-era","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Welcome to the DevOps Platform era\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2021-08-03\",\n      }",{"title":8621,"description":8622,"authors":8627,"heroImage":8623,"date":8628,"body":8629,"category":769,"tags":8630},[1609],"2021-08-03","\nDevOps has evolved since its infancy, over a decade ago. Swiss developmental psychologist Jean Piaget believed human cognitive development has [four stages](https://www.healthline.com/health/piaget-stages-of-development) (sensorimotor, preoperational, concrete operational, and formal operational). Through each of these stages, the human mind obtains new knowledge while building and modifying memories to inform one's understanding of the world around them.\n\nIn the same way that people go through stages as they grow, markets and industries also go through stages of development. Over the years, DevOps has grown into a mature, business-critical practice.\n\nAs the DevOps industry expanded, so did the number and complexity of tool-project integrations within an organization. This was the result of three developments in DevOps:\n\n1. Companies moved from monolithic architectures to [microservices architectures](/topics/microservices/). By doing so, applications could scale independently, allowing teams to move faster.\n2. The faster delivery of software also required companies to use more DevOps tools per project.\n3. The linear growth of both or more projects and more tools per project led to an exponential increase in the number of project-tool integrations.\n\nThis increase in project-tool integrations called for a change in the way organizations adopted DevOps tools. At GitLab, we identified four phases of evolution in the adoption of DevOps tools over time.\n\n## Phase 1 - Siloed DevOps\n\nIn this early phase, each department or team built or purchased their own tools in isolation, which they optimized for their own narrow objectives, without explicitly coordinating with others. This led to a \"Siloed DevOps\" environment that caused problems when teams tried to work together because they were not familiar with the tools of the other teams. It is common for organizations at this level of maturity to have multiple duplicative sets of tooling for common DevOps functions like planning, source code management, and CI/CD. The chaotic environment slows down collaboration and knowledge sharing or stops it altogether.\n\n## Phase 2 - Fragmented DevOps\n\nThe need for less chaos and more harmony drove organizations to the second phase, Fragmented DevOps. In this phase, organizations standardized on the same set of tools across the organization. Typically, there was one preferred tool for each stage of the DevOps lifecycle. Teams within the same function could collaborate better, but the tools were not connected between stages. As an example, planning was standardized and deployment was standardized, but each stage was still siloed from each other. It was hard to move through the DevOps lifecycle.\n\n## Phase 3 - DIY DevOps\n\nOrganizations that tried to remedy this by manually integrating their DevOps point solutions together reached the third phase, \"DIY DevOps\". Unfortunately – as many DIYers will know all too well – when you try to put together many different parts that were never designed to work with each other, the end results never fit quite right. In the same way, homegrown toolchains create complex workflows that slow down the development process — and overall cycle time. For many organizations, maintaining DIY DevOps toolchains requires significant effort, resulting in higher costs, slower cycle times, and opportunities for vulnerabilities to be targeted.\n\n## Phase 4 - The DevOps Platform era\n\nThe true potential of DevOps was not fully realized in the first three phases. That's why I am proud that GitLab is the leader in enabling the fourth phase, the DevOps Platform era. [The DevOps Platform](/topics/devops-platform/) is a single application with one user interface and a unified data store. It includes every stage of the DevOps lifecycle and brings together development, operations, and security teams. It allows these groups to collaboratively plan, build, secure, and deploy software. As a result, this improves businesses' velocity, efficiency, and security, allowing them to deliver software faster and at a lower cost.\n\n## The future of DevOps\n\nWhen I think about the future of DevOps, three things stand out. First, I believe that a platform solution with embedded security _ is_ the future. Security that is built-in, not bolt-on, is needed to secure a software supply chain from end-to-end without sacrificing speed for security.\n\nFor example, the world's most trusted hacker-powered security company, HackerOne, is using The DevOps Platform. With GitLab, they've been able to replace their DIY toolchain and shift security left. HackerOne is now catching security flaws early and getting immediate feedback since security is built into the developer's workflow.\n\nIn May, the U.S. government [issued a new policy](https://www.whitehouse.gov/briefing-room/presidential-actions/2021/05/12/executive-order-on-improving-the-nations-cybersecurity/) aimed at securing both the private and public sector software supply chains against malicious cyberattacks. Now is the time to make security a fundamental part of your DevOps journey. In today's landscape, you need to secure 100% of your applications every time they get updated. The only practical way to do that is to integrate security into the platform.\n\nSecond, I believe that machine learning will be critical in making the DevOps workflow faster. In the [GitLab 2021 DevSecOps survey](/developer-survey/), 75% of respondents reported that their DevOps teams are using or planning to use machine learning or AI for testing and code review. In June, [GitLab announced the acquisition](/press/releases/2021-06-02-gitlab-acquires-unreview-machine-learning-capabilities.html) of a machine learning-based solution called UnReview. This acquisition and continued machine learning integration will automate workflows and compress the DevOps cycle time. GitLab is focused on using machine learning to reduce friction in your work, so you can spend more time innovating.\n\nThird, I believe DevOps platform adoption will accelerate. [Gartner predicts that by 2023](/press/releases/2020-12-09-gitlab-cited-as-representative-vendor-in-gartner-market-guide.html), 40% of organizations will have switched from multiple point solutions to a platform in order to streamline application delivery. Gartner's prediction is an increase from the base of 10% or less using a DevOps Platform in 2020. GitLab customers often tell us that DIY toolchains are too complicated. If you're feeling that way too, it's time to choose a path to simplicity. The fastest way to get there is with the DevOps Platform.\n\nYou don't need to rip and replace to get started. Many customers began their GitLab journey with Source Code Management and CI. When they were ready, GitLab helped them to replace the rest of their DIY DevOps. When _you're_ ready, GitLab will work with you and GitLab's partner ecosystem to help you achieve your DevOps objectives on your schedule.\n\nJust like human cognitive development, DevOps has evolved thanks to combined experiences and new knowledge as it became available. I'm grateful to the innovators before us with the same goal: To make DevOps more efficient and collaborative.\n\n## Join us at GitLab Virtual Commit\n\nWant more DevOps? Tune in virtually at [GitLab Commit August 3-4, 2021](/events/commit/). Watch a video of the keynote address this blog post is based on:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://player.vimeo.com/video/582282482\" width=\"640\" height=\"360\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\u003C!-- blank line -->\n\n## Read more about the DevOps Platform:\n\n- [The journey to a DevOps Platform](/blog/the-journey-to-a-devops-platform/)\n\n- [How ten steps over ten years led to the DevOps Platform](/blog/how-ten-steps-over-ten-years-led-to-the-devops-platform/)\n\n- [Making the case for a DevOps platform: What data and customers say](/blog/making-the-case-for-a-devops-platform-what-data-and-customers-say/)\n\n- [Agile planning with a DevOps platform](/blog/agile-planning-with-a-devops-platform/)\n\n- [It's time to build more accessible software. A DevOps platform can help](/blog/how-the-devops-platform-makes-building-accessible-software-easier/)\n",[9,726,728],{"slug":8632,"featured":6,"template":686},"welcome-to-the-devops-platform-era","content:en-us:blog:welcome-to-the-devops-platform-era.yml","Welcome To The Devops Platform Era","en-us/blog/welcome-to-the-devops-platform-era.yml","en-us/blog/welcome-to-the-devops-platform-era",{"_path":8638,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8639,"content":8645,"config":8652,"_id":8654,"_type":14,"title":8655,"_source":16,"_file":8656,"_stem":8657,"_extension":19},"/en-us/blog/welcoming-opencores-to-gitlab",{"title":8640,"description":8641,"ogTitle":8640,"ogDescription":8641,"noIndex":6,"ogImage":8642,"ogUrl":8643,"ogSiteName":670,"ogType":671,"canonicalUrls":8643,"schema":8644},"OpenCores come to GitLab","OpenCores moves to GitLab to accelerate digital design flow.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669768/Blog/Hero%20Images/gitlab-opencores-oliscience.jpg","https://about.gitlab.com/blog/welcoming-opencores-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"OpenCores come to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andrea Borga\"},{\"@type\":\"Person\",\"name\":\"David Planella\"}],\n        \"datePublished\": \"2019-12-03\",\n      }",{"title":8640,"description":8641,"authors":8646,"heroImage":8642,"date":8649,"body":8650,"category":299,"tags":8651},[8647,8648],"Andrea Borga","David Planella","2019-12-03","\n\n[OpenCores](https://opencores.org), the largest and most popular Gateware\ndevelopment community with over [300K members and 1200+\nprojects](https://opencores.org/about/statistics), is moving to GitLab. This\nis excellent news all around: as a catalyst for accelerating IP core development\nand for introducing GitLab to innovative uses in the scientific and electronic\ndesign communities.\n\n## Gateware and OpenCores\n\n![Gateware design flow diagram](https://about.gitlab.com/images/blogimages/welcoming-opencores-to-gitlab/gateware_flow.png \"Gateware flow\")\n\nOpenCores is a repository of reusable units of logic, open to use as building\nblocks for the electronics design community. These units are most commonly known as\n[Intellectual Property (IP)\ncores](https://en.wikipedia.org/wiki/Semiconductor_intellectual_property_core),\nand are described (coded), in [Hardware Description\nLanguage](https://en.wikipedia.org/wiki/Hardware_description_language) (HDL)\nfor the most part.\n\n### What is gateware?\n\nIn the semiconductor industry, these are the basic constituents of advanced\ndigital designs, collectively known as\n**gateware**: A layer in the electronics development chain positioned in\nbetween _hardware_ (such as a Printed Circuit Board – PCB – or a packaged chip),\nand _firmware_ (a set of decoded and executed instructions for a microprocessor).\n\n### What is OpenCores?\n\nThe [OpenCores portal](https://opencores.org) hosts the source code for a\nmultitude of digital gateware projects. In its more than 20 years of web history, it has\nevolved into a platform that enables its user community to discover, showcase,\nand manage such projects, including revision control for [source code](/solutions/source-code-management/).\n\nThe target devices for gateware have historically been\n[FPGA](https://en.wikipedia.org/wiki/Field-programmable_gate_array) (Field\nProgrammable Gate Arrays) and\n[ASIC](https://en.wikipedia.org/wiki/Application-specific_integrated_circuit)s\n(Application Specific Integrated Circuits), which allow building a vast range\nof hardware digital electronics appliances. These are often described as\n[SoC](https://en.wikipedia.org/wiki/System_on_a_chip) (System on a Chip).\n\nIn recent years, the OpenCores portal has been particularly focused on hosting FPGA\napplications, with the intention to enlarge the pool of available cores based\non emerging hardware description methods, such as\n[HLS](https://en.wikipedia.org/wiki/High-level_synthesis) (High-level\nsynthesis).\n\nOpenCores is also the place where digital designers meet to showcase, promote,\nand talk about their passion and work. They do this through forums, news feeds,\nand much more!\n\n### Who maintains the OpenCores portal?\n\n[Oliscience](http://oliscience.nl/) (open logic interconnects science) act as\nthe stewards of the OpenCores community and its portal. Oliscience is an\ninitiative originated from the\n[CERN](/customers/cern/)-Nikhef Business Incubation\nCentre (CERN-BIC@Nikhef), and is [supported](https://opencores.org/partners) by\n[Nikhef](https://www.nikhef.nl/en/), the Dutch National Institute for Subatomic\nPhysics, and [ASTRON](https://www.astron.nl/), the Netherlands Institute for\nRadio Astronomy.\n\nAs part of the stewardship charter, Oliscience is committed to maintaining and\nsupporting the OpenCores portal. This mission involves globally promoting its\ncommunity, fostering the use of open standards and practices, actively\ndeveloping the portal infrastructure and content, and more.\nThe [Wishbone bus](https://en.wikipedia.org/wiki/Wishbone_(computer_bus)),\nused throughout OpenCores designs, is one of the most well-known examples.\n\n## Leading change and embracing the DevOps culture for Gateware development\n\n[Moore's law](https://en.wikipedia.org/wiki/Moore%27s_law) is slowing down, and\nthe semiconductor industry is starting to experience a new resurgence. With a\nwave of new opportunities arising, FPGA is one of the key technologies that\nplay a crucial role in the future of computing architectures.\n\nThe barrier to entry for becoming a gateway developer is fairly higher than learning a new programming language as a software developer. As\nsuch, the digital electronics industry is continually striving to simplify\nthe approach to programmable logic.\n\nOpen Source IP Cores play a significant role in this goal. They unlock a\nvast knowledge pool that enables new gateware developers to start hacking on\nnew projects straight away. They can use existing solutions to draw knowledge\nvery quickly.\n\nIP Cores strive for quality, and quality calls for a structured way to assess\nthe content of a code bundle. This is where Continuous Verification (CV) comes into\nplay.\n\nIn the context of programmable logic, CV is a\nworkflow in which Gateware defined in a [HDL](https://en.wikipedia.org/wiki/Hardware_description_language)\nruns against standardized testbenches and benchmarked to assess and rank its\nquality. Full coverage for test cases and failure corner cases is guaranteed.\n\n## Accelerating digital design with GitLab\n\nThe OpenCores community leaders have strong ties to [CERN](https://home.cern/)\nand the [European Space Agency](https://www.esa.int/). Both are leading\nresearch organizations committed to supporting their respective scientific\ncommunities, which use GitLab for internal development.\n\nBoth organizations and the electronics industry in general are particularly\ninterested in a better assessment of the quality of gateware products, as their\nusage in industrial and commercial applications continues to increase at an\naccelerated rate. When you launch a satellite into space, you can't just press\nthe reset button if there is a bug!\n\nWhile talking to those teams, and hearing the preliminary exploration of\nimplementing CV practices into gateware design, [GitLab's integral CI/CD\nfeatures](/solutions/continuous-integration/) seemed a natural fit to pioneer the adoption of a DevOps approach to\ndigital design.\n\nSource control was also a feature that would enable engineers to share and\ncollaborate on their code in the public space. In summary, the benefits of a\nsingle application for the entire DevOps cycle, with the ultimate goal of\nreducing the gateware design cycle time made the decision easy.\n\nThe next objective for the OpenCores team is to implement a CV process in the\nOpenCores portal, starting with FPGA and until ASICs. It's an ambitious one,\nwhich requires ambitious partners.\n\nAndrea Borga, Oliscience CEO mentions:\n> we have a very strong scientific background, and we love to make experiments…\n> all the time! Exploring new ideas, and striving for impeccable execution are\n> embedded in our engineering way of thinking. You need innovative and\n> ambitious partners to achieve equally innovative and ambitious goals. This\n> is why we do what we do, and why we firmly believe GitLab's vision and spirit\n> strongly align with our own. This is how we chose to go with them.\n\nGitLab is thrilled to start working with the OpenCores team, to contribute to\nthat goal and welcoming them to a community that leading Open Source projects\nsuch as Drupal, GNOME, KDE, Debian, Freedesktop and many more are already a\npart of.\n\n[Cover image](https://www.flickr.com/photos/130561288@N04/39116042294/) by\n[Fritzchens Fritz](https://www.flickr.com/photos/130561288@N04/),\nlicensed under [CC0 1.0](https://creativecommons.org/publicdomain/zero/1.0/)\n{: .note}\n",[9,109,682,726,267],{"slug":8653,"featured":6,"template":686},"welcoming-opencores-to-gitlab","content:en-us:blog:welcoming-opencores-to-gitlab.yml","Welcoming Opencores To Gitlab","en-us/blog/welcoming-opencores-to-gitlab.yml","en-us/blog/welcoming-opencores-to-gitlab",{"_path":8659,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8660,"content":8665,"config":8670,"_id":8672,"_type":14,"title":8673,"_source":16,"_file":8674,"_stem":8675,"_extension":19},"/en-us/blog/what-are-the-benefits-of-a-microservices-architecture",{"title":8661,"description":8662,"ogTitle":8661,"ogDescription":8662,"noIndex":6,"ogImage":7521,"ogUrl":8663,"ogSiteName":670,"ogType":671,"canonicalUrls":8663,"schema":8664},"What are the benefits of a microservices architecture?","On the fence about what a microservices architecture can bring to your team? Here's what you need to know.","https://about.gitlab.com/blog/what-are-the-benefits-of-a-microservices-architecture","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What are the benefits of a microservices architecture?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-09-29\",\n      }",{"title":8661,"description":8662,"authors":8666,"heroImage":7521,"date":8667,"body":8668,"category":769,"tags":8669},[745],"2022-09-29","\n[Microservices architecture](/topics/microservices/) is a framework where an application is separated into smaller services and each of those services typically runs a unique process and manages its own database. There are many pros and cons to microservices. Let's explore them.\n\n## Advantages of microservices architecture\n\n### Scalability improvements\n\nSince each microservice runs independently, it is easier to add, remove, update or scale each cloud microservice. Developers can perform these tasks without disrupting any other microservice in the system. Companies can scale each microservice as needed. For instance, if a particular microservice experiences increased demand because of seasonal buying periods, more resources can be efficiently devoted to it. If demand drops as the season changes, the microservice can be scaled back, allowing resources or computing power to be used in other areas.\n\n### Improved fault isolation\n\nUnder a monolithic architecture structure, when developers experience a failure in one element of the architecture, it will collapse all architecture components. With a microservices architecture, if one service fails, it’s much less likely that other parts of the application will fail because each microservice runs independently. However, businesses need to be careful, because large volumes of traffic can still be overwhelming in some cases.\n\nThe benefit of a microservice architecture is that developers can deploy features that prevent cascading failures. A variety of tools are also available, from GitLab and others, to build fault-tolerant microservices that help improve the resilience of the infrastructure.\n\n### Program language and technology agnostic\n\nA microservice application can be programmed in _any_ language, so dev teams can choose the best language for the job. The fact that microservices architectures are language agnostic also allows the developers to use their existing skill sets to maximum advantage – no need to learn a new programming language just get the work done.\nUsing cloud-based microservices gives developers another advantage, as they can access an application from any internet-connected device, regardless of its platform.\n\n### Simpler to deploy\n\nA microservices architecture lets teams deploy independent applications without affecting other services in the architecture. This feature, one of the pros of microservices, will enable developers to add new modules without redesigning the system's complete structure. Businesses can efficiently add new features as needed under a microservices architecture.\n\n### Reusability across different areas of business\n\nSome microservice applications may be shareable across a business. If a site has several different areas, each with a login or payment option, the same microservice application can be used in each instance.\n\n### Faster time-to-market\n\nDevelopers can plug this new “microsurgery” into the architecture without fear of conflicts with other code or of creating service outages that ripple across the website. Development teams working on different microservices don't have to wait for each other to finish. Companies can develop and deploy new features quickly and upgrade older components as new technologies allow them to evolve.\n\n### Ability to experiment\n\nDeciding to go forward with experimentation is much easier with microservices architecture.\n\nIt’s simple to roll out new features because each service is independent of the others. If customers don't like it, or the business benefits aren’t clear, it's much easier to roll it back without affecting the rest of the operation.\n\nIf a  new feature is a customer request, a microservices architecture means they’ll get to experience it in weeks, rather than months or years.\n\n### Improved data security\n\nIf the components of the computer systems architecture break down into smaller pieces, sensitive data is protected from intrusions from another area. While there are connections between all microservices, developers can use secure APIs to connect the services. Secure APIs safeguard data by ensuring it is only available to specifically authorized users, applications and servers. If a business requires handling sensitive data such as health or financial information, it's easier to achieve compliance under data security standards such as healthcare's [HIPAA](https://www.hhs.gov/hipaa/index.html) or the European [GDPR](https://gdpr-info.eu).\n\n### Outsourcing flexibility\n\nIt may be necessary for a business to outsource certain functions to third-party partners. Many companies are concerned about protecting intellectual property with a monolithic architecture format. However, a microservices architecture allows businesses to segment areas just for  partners that won’t otherwise disclose core services.\n\n### Team optimization\n\nWhen considering the size of teams you assign to each microservice, consider the two-pizza rule. First articulated by Amazon, which pioneered microservices, the idea is to keep development teams small enough to feed them with two pizzas. Experts explain that this guideline improves work efficiency, allows businesses to achieve goals faster, makes teams easier to manage, creates greater focus among the group and results in higher quality products.\n\n### Attractive for engineers\n\nEngineers find microservices architecture enticing, and companies have a [better chance of finding top-flight talent](/blog/have-devops-jobs-to-fill-try-these-3-strategies-to-hire-and-retain/) to work on microservices application development. Microservices rely on the latest engineering practices and developer tools. This provides an important advantage for businesses hoping to attract specialists.\n\n## Disadvantages of microservices\n\nWhile there are a solid number of advantages for any business, there are also a few disadvantages of microservices to consider before adoption.\n\n### Upfront costs are higher with microservices\n\nWhile cloud microservices are a pro, such as saving money over the long run, there are cons, such as the costs associated with their initial deployment. A business needs to have sufficient hosting infrastructure with security and maintenance support. Even more important, it will need skilled teams to manage all services.\n\n### Interface control is crucial\n\nSince each microservice has its own API, any application using that service will be affected if you change the API, and that change is not backward compatible. Any large operation using a microservices architecture will have hundreds, even thousands, of APIs so controlling those interfaces becomes critical to the business's operation, which can be a disadvantage to microservices architecture.\n\n### A different kind of complexity\n\nDebugging can be more challenging with a microservices architecture. Each microservice will have its own set of logs. This provides a minor headache when tracing the source of a problem in the code.\n\n### Integration testing\n\nUnit testing is more manageable with microservices architecture. Integration testing is not. Since the architecture distributes each microservice, developers cannot test the entire system from their machines.\n\n### Service-oriented architecture vs. microservices\n\nIf you work in cloud computing, you're probably aware of the [service-oriented architecture (SOA)](https://www.techtarget.com/searchapparchitecture/definition/service-oriented-architecture-SOA) versus microservices debate. In many ways, the two architectures are similar as they both involve cloud computing for agile development. Both break large monolithic components into smaller units that are easier to work with.\n\nThe biggest difference is that SOA is an enterprise-wide approach to developing software components. Microservices, meanwhile, build standalone applications that perform a specific function and this cloud-native approach to development and deployment makes them more scalable, agile and resistant. \n\nSo, in essence, the difference between the two comes down to scope. SOA is an enterprise-wide approach, while a microservices architecture has an application scope.\n\nRead on to learn [how to get started with a microservices architecture](/blog/get-started-with-microservices-architecture/).\n",[9,9,916],{"slug":8671,"featured":91,"template":686},"what-are-the-benefits-of-a-microservices-architecture","content:en-us:blog:what-are-the-benefits-of-a-microservices-architecture.yml","What Are The Benefits Of A Microservices Architecture","en-us/blog/what-are-the-benefits-of-a-microservices-architecture.yml","en-us/blog/what-are-the-benefits-of-a-microservices-architecture",{"_path":8677,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8678,"content":8683,"config":8688,"_id":8690,"_type":14,"title":8691,"_source":16,"_file":8692,"_stem":8693,"_extension":19},"/en-us/blog/what-blocks-faster-code-release",{"title":8679,"description":8680,"ogTitle":8679,"ogDescription":8680,"noIndex":6,"ogImage":1801,"ogUrl":8681,"ogSiteName":670,"ogType":671,"canonicalUrls":8681,"schema":8682},"What blocks faster code releases? It starts with testing","Our 2020 DevSecOps Survey found testing was the number one reason for release delays, but planning and code reviews were also challenges. Here’s what you need to know.","https://about.gitlab.com/blog/what-blocks-faster-code-release","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What blocks faster code releases? It starts with testing\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-05-29\",\n      }",{"title":8679,"description":8680,"authors":8684,"heroImage":1801,"date":8685,"body":8686,"category":679,"tags":8687},[851],"2020-05-29","\nOur [2022 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/previous/2022/) is out now! Learn the latest in DevOps insights from over 5,000 DevOps professionals.\n\nFirst, the good news: do DevOps right and you’ll release code faster. In fact, 83% of our [2020 Global DevSecOps Survey](/developer-survey/) respondents said code heads out the door more quickly thanks to a successful DevOps practice.\n\nBut we also asked survey takers what was most likely to delay their code, and their responses highlighted some of the toughest challenges DevOps practitioners face. When it comes to delays, 47% said testing was the culprit, while 39% said planning, and 28% said code review.\n\nAt a time when faster software releases are perhaps even more critical than ever before, it may be helpful for your organization to take a hard look at what blocked our 3652 respondents from 21 countries across 19 job categories. Test, planning, and code reviews are essential steps in DevOps, but as our survey responses show, they can easily turn into black holes of time and frustration.\n\n## The trouble with testing\n\nLet’s just say it: Testing is hard. A key component of successful DevOps, testing is apparently the hill many teams die on – repeatedly. In [our 2019 survey](/blog/global-developer-report/) 49% of all respondents pointed their fingers squarely at test as the primary cause of delays, and it’s discouraging that the percentage was only slightly smaller this year.\n\n_\"We are slow and do not test very well. We do Big Bang deployments.\"_\n\nThe trouble with testing boils down to essentially two issues: there are never enough tests done and automating testing is tricky. We asked developers to assess their tasks and to tell us what they should be doing but are not. The vast majority of them said they weren’t doing enough testing, period.\n\n_\"Not enough tests (or none) and then the code doesn’t work in production. Some collaborators have poor IT skills.\"_\n\n_\"Too little testing done too late.\"_\n\n_\"We need more test cases to cover 100% of everything.\"_\n\nJust 12% of survey takers told us their teams had full test automation and about 25% said they either have nothing set up or are only beginning the automation journey.\n\nThere are a few glimmers of hope. For starters, teams that have cracked the test automation code told us about the concrete benefits.\n\n_\"We do [TDD (test driven development)](https://www.agilealliance.org/glossary/tdd/). QA and dev act as a team. We have automated tests running parallel with developing code.\"_\n\nAnd 16% of survey respondents either have a \"bot\" reviewing their code or have an AI/ML tool in place for testing. It's early days for AI-powered testing, clearly, but the results are intriguing.\n\n## The truth about planning\n\nTesting may be technically challenging but planning is also a significant stretch for many development teams. A developer shared that work happened \"without much planning\" and that was a common refrain.\n\nOne reason for a perceived or real lack of planning could lie in the fact that many software teams use hybrid development methodologies, each of which have their own (not necessarily compatible) planning practices. Feedback from survey takers seemed to support this.\n\n_\"Planning is in the form of some teams doing waterfall and others doing 'wagile.''\"_\n\n_\"Planning is somewhat heavyweight and a little less than agile.\"_\n\nFor many of our survey takers, there was just overall frustration with the planning process.\n\n_\"Poor planning leads to a lot of doubling back.\"_\n\n## The paradox of code reviews\n\nThere is no question code reviews are critical to DevOps success. Almost 50% of all our respondents conduct them weekly and a significant percentage do them twice a week or even daily. But they’re also a source of frustration when it comes to getting code out the door quickly. Code reviews at some companies can require too many people, or not enough people, or too much \"paperwork.\"\n\n_\"We have a strict code review process and it often takes several days for the reviewer to respond to requests for review.\"_\n\n_\"Code review takes time and every developer has to explain how he achieved what he did.\"_\n\n_\"Code reviews can take a long time due to the lack of reviewers.\"_\n\nCode reviews are cumbersome but 95% of our respondents said they’re either very or moderately valuable for ensuring code quality and security. The trick is to just figure out how to streamline them, and one survey taker offered his organization’s strategy: \"Each merge request is code reviewed by a peer; there is no team code review.\"\n\nOur [2022 Global DevSecOps Survey](/developer-survey/) has the latest insights from over 5,000 DevOps professionals. You can also compare it with [previous year surveys](/developer-survey/previous/)\n",[771,681,9],{"slug":8689,"featured":6,"template":686},"what-blocks-faster-code-release","content:en-us:blog:what-blocks-faster-code-release.yml","What Blocks Faster Code Release","en-us/blog/what-blocks-faster-code-release.yml","en-us/blog/what-blocks-faster-code-release",{"_path":8695,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8696,"content":8702,"config":8707,"_id":8709,"_type":14,"title":8710,"_source":16,"_file":8711,"_stem":8712,"_extension":19},"/en-us/blog/what-south-africa-taught-me-about-cybersecurity",{"title":8697,"description":8698,"ogTitle":8697,"ogDescription":8698,"noIndex":6,"ogImage":8699,"ogUrl":8700,"ogSiteName":670,"ogType":671,"canonicalUrls":8700,"schema":8701},"What our summit in South Africa taught me about cybersecurity","Cybersecurity is a necessity, but it's often treated as an afterthought. What it has in common with modern photography could tell us how to make it less painful to achieve.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671140/Blog/Hero%20Images/south-africa-cyber-security.jpg","https://about.gitlab.com/blog/what-south-africa-taught-me-about-cybersecurity","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What our summit in South Africa taught me about cybersecurity\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cindy Blake\"}],\n        \"datePublished\": \"2018-09-11\",\n      }",{"title":8697,"description":8698,"authors":8703,"heroImage":8699,"date":8704,"body":8705,"category":679,"tags":8706},[1921],"2018-09-11","\nThe GitLab team [summit](/events/gitlab-contribute/) recently took place in Cape Town, South Africa, which, as you can imagine, promised to be memorable.\nWhen preparing to cross three continents over 22 hours on airplanes, you think carefully about what to pack. You are anticipating the most beautiful scenery ever and want to make sure you capture it in pictures. So you find your camera – the one you haven't used in a long time because you've grown accustomed to using your cellphone. After careful debate, you decide to take it because those awesome experiences and scenery deserve the best camera.\n\nThe camera requires certain things: You have to make sure it's charged or has batteries; it needs to have adequate storage; you may need additional lenses which also require special care. Everything must be protected and carefully packed, and requires additional space and weight to carry on board with you because it's too precious to put in checked baggage.\n\nWhen you get to South Africa and you see this incredible scenery, you take out your wonderful camera and you realize a few things:\n\n## 1. You have only a precious few moments to capture the image\n\nDo I really have time to fidget with f-stops and customizations, or do I just want to capture the picture and perhaps customize it a bit later by cropping and adjusting the light?\n\n## 2. It's difficult to share camera photos immediately\n\nI'm anxious to share these images with friends and family back home. It occurs to me that with my phone I can share images immediately and effortlessly by email, text, Slack, or a variety of social media. If I take the pictures on my expensive camera, I can't share them immediately because it's not connected to anything. I'll have to wait until I get back to my hotel room so I can take the flash drive and put it in my laptop, log on to the Wi-Fi, and then share my images.\n\n## 3. My camera photos aren't secure\n\nIf I lose that flash drive, all of my images are gone (unless I back them up immediately after capturing them – not likely!). While it is possible I could lose my cell phone and lose my pictures, it's less likely. My phone is an integral part of my daily workflow – an appendage even. How often do we feel naked if we forget our phone at home or even in the other room? I'm much less likely to leave my phone on the bus when I get off to explore then to leave my camera behind.\n\nSo, I choose to use my phone to capture these magnificent images. My primary objective is not taking fabulous pictures worthy of publication that I can sell or frame on my wall, but to take pictures that are good enough, that capture the special place, and that I can share with friends and family easily and effortlessly. If it's too hard to share, I may not do it, or it may take me a long time. In addition, I don't have to think ahead about how my phone will capture an image in such a way to send it to friends; the images automatically integrate with all of the other sharing mechanisms on my phone. It simply works. I am free to focus on my primary effort of capturing the images while I soak up the moments.\n\n## Now, how does this relate to cybersecurity?\n\nCompanies invest a great deal in [application security to test their software](/topics/devsecops/) for security vulnerabilities. It's a separate application that requires its own budget and maintenance. Like the specialized camera, the information it creates must be shared in order to be most useful. The security team can use it by itself, but to be truly effective, the vulnerabilities found must be shared with development so that they can be corrected. Yet developers have little interest in logging onto a security system to access the data. Would your friends and family want to physically turn on your camera to look at your pictures? Maybe, but it's very limiting as to whom you can reach.\n\nThe challenge then is how do you get the data found by the application security system into the hands of the developers? Today that is one of the greatest challenges to overcome, even in rare cases where the objectives of security and dev totally align.\n\n### What if you looked at application security the same way we look at photographing images?\n\nIs the prime objective to do the most eloquent job of finding the vulnerabilities? Or, is the prime objective to get the vulnerabilities that we do find fixed? If it is the latter then the primary issue must be integrating with the developers’ workflow.\n\nWith [GitLab application security testing](/solutions/security-compliance/), it is like the camera on your phone – maybe not superior to a dedicated tool in isolation, but good quality, and more importantly, integrated into the workflow to be the most useful. It is easily and efficiently used without added thought. With GitLab, every commit and every merge request is tested. There isn't even a separate step – it's all automated for you without additional effort.\n\n![GitLab security dashboard](https://about.gitlab.com/images/blogimages/security-dashboard.png){: .shadow.medium.center}\n\nAs with photography, the most important thing is that you capture those moments before they escape you; with application security testing, it's important that you capture those vulnerabilities so that you can act upon them. With GitLab, the vulnerabilities are shown right there in the developers' workflow. They don't have to log into a different system nor interrupt their work. The security vulnerabilities are shown right alongside any other application flaws in the pipeline results of each merge request. The developer can choose to fix them now or continue the build, but either way, the vulnerabilities are captured and logged. And now with the security dashboard, the security team can evaluate further and create an issue for remediation if needed.\n\n>The vulnerabilities are shown right there in the developers' workflow. They don't have to log into a different system nor interrupt their work\n\nThis really does turn application security on its head! It puts the insight and tools for action into the hands of the developer and then shares results with security, rather than the other way around. It makes so much more sense because the developer must do the remediation, not the security pro. Imagine the efficiency gains if most of the effort was placed on eliminating the vulnerabilities up front, rather than on finding and tracking them later in the SDLC! Sound familiar? This has been imagined before and cost savings even estimated. It's the \"shift left\" mantra. While everyone embraces it, few actually achieve it. Why? Because they lack the tools to enable such a seismic shift where the only gate is the merge request.\n\nAlbert Einstein said that the definition of insanity was doing the same thing and expecting a different result. So how can we expect traditional application security methods to meet the needs of modern, cloud-first DevOps environments? We can't. With GitLab, our single application helps users efficiently develop and deploy secure code by leveraging the power of integration across the entire SDLC. [No more stitching together complex DevOps tool chains](/). Microsoft did something similar years ago. Remember Word Perfect? It succumbed to Word because content could be copied/pasted and integrated across the Microsoft suite of documents, spreadsheets and slides. GitLab is on track to do the same thing for software development – including application security testing.\n\n_What do you think? Is this a new era of app security?_\n\nPhoto by [Clyde Thoma](https://unsplash.com/photos/8plz1xK_Wmk?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyTexts) on [Unsplash](https://unsplash.com/search/photos/cape-town?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[875,9,1829],{"slug":8708,"featured":6,"template":686},"what-south-africa-taught-me-about-cybersecurity","content:en-us:blog:what-south-africa-taught-me-about-cybersecurity.yml","What South Africa Taught Me About Cybersecurity","en-us/blog/what-south-africa-taught-me-about-cybersecurity.yml","en-us/blog/what-south-africa-taught-me-about-cybersecurity",{"_path":8714,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8715,"content":8721,"config":8726,"_id":8728,"_type":14,"title":8729,"_source":16,"_file":8730,"_stem":8731,"_extension":19},"/en-us/blog/what-the-solarwinds-attack-can-teach-us-about-devsecops",{"title":8716,"description":8717,"ogTitle":8716,"ogDescription":8717,"noIndex":6,"ogImage":8718,"ogUrl":8719,"ogSiteName":670,"ogType":671,"canonicalUrls":8719,"schema":8720},"How DevSecOps can protect businesses from future supply chain attacks","Learn how GitLab's all-in-one DevSecOps solution can help businesses keep their supply chains secure.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669522/Blog/Hero%20Images/solarpanels.jpg","https://about.gitlab.com/blog/what-the-solarwinds-attack-can-teach-us-about-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How DevSecOps can protect businesses from future supply chain attacks\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Pedro Fortuna\"},{\"@type\":\"Person\",\"name\":\"Sam Kerr\"}],\n        \"datePublished\": \"2021-08-18\",\n      }",{"title":8716,"description":8717,"authors":8722,"heroImage":8718,"date":8723,"body":8724,"category":875,"tags":8725},[5250,5251],"2021-08-18","\n\nOne of the cybersecurity keywords for 2021 will undoubtedly be \"software supply chain attacks\". For decades, we've seen a global move toward connected systems and highly complex supply chains. Today these supply chains are under attack, with malicious actors jeopardizing the sensitive data of millions of users through attacks on the public and private sectors.\n\nAfter public and federal entities were targeted in some high-profile supply chain attacks, the United States government released an [executive order](https://www.whitehouse.gov/briefing-room/presidential-actions/2021/05/12/executive-order-on-improving-the-nations-cybersecurity/) with plans to improve the security posture of the nation when it comes to software supply chains. The UK issued a similar [call for views on cybersecurity in supply chains](https://www.gov.uk/government/publications/call-for-views-on-supply-chain-cyber-security/call-for-views-on-cyber-security-in-supply-chains-and-managed-service-providers) around the same time.\n\nSupply chain security became a global priority after the SolarWinds attack, reminding businesses and institutions of the extensive damages caused by these vulnerabilities.\n\n## A brief summary of the SolarWinds incident\n\nBetween April and June 2020, malicious actors exploited vulnerabilities in the Solarwinds Orion Platform and inserted a backdoor – allowing attackers to deploy multiple payloads like spyware, and leak confidential data from companies that used the platform, including some U.S. federal entities.\n\n[Current estimates are 18,000 organizations](https://www.zdnet.com/article/sec-filings-solarwinds-says-18000-customers-are-impacted-by-recent-hack/) were impacted by the [SolarWinds attack](/blog/devops-platform-supply-chain-attacks/), but there's a lot we still don't know about the magnitude of the attack. One thing we know for certain is the number of successful supply chain attacks is increasing, and businesses need more effective security measures to protect their software supply chain.\n\n[Web-based supply chain attacks](https://jscrambler.com/resources/white-papers/supply-chain-attacks?utm_source=about.gitlab.com&utm_medium=referral&utm_campaign=key-lessons-solarwinds) is a particularly concerning vector of attack. The number of web-based attacks have grown over the past few years and provide attackers with a lower barrier to entry when it comes to getting their hands on valuable user data.\n\n## An emerging attack vector: Web supply chain attacks\n\nToday, the average website runs [35 distinct third-party scripts](https://www.reflectiz.com/blog/looking-at-the-figures-of-third-party-application-security-on-websites-part-1/). Plus, it's estimated that only 3% of the source code of the actual website is written by the team developing the website, while the remaining 97% comes from third-party libraries used during development. Oftentimes, several pieces of third-party code will be coming from companies or individuals with fewer resources dedicated to security, which puts the typical website on precarious footing with an extremely high level of exposure to third-party risk.\n\nThe dependence on third-party code creates a significant opportunity for attackers by allowing them to breach a third-party code supplier and inject a malicious payload into the source code of the third-party script. The compromised source code will then make its way down the web supply chain, reaching hundreds or thousands of different websites.\n\nHere's where things get even more complicated. In the context of the web, every website script has the same privileges, whether it is a first or third party. As such, a compromised third-party script will be able to harvest any user input, add extra code, hijack events, and fully modify the behavior of the web page. As a result, web supply chain attacks are now being used to leak sensitive user data, such as user credentials, credit card numbers, and other types of PII/PHI that are then sold on underground marketplaces.\n\nA prime example of a web supply chain attack occurred in April 2021, when Codecov, a popular code coverage tool, was breached. The attackers modified the source code of the tool and leaked sensitive data, including dev credentials, tokens, and keys. At the time, more than 29,000 companies were potentially exposed to the attack and some companies reported being breached by [Magecart web skimmers](https://www.bleepingcomputer.com/news/security/e-commerce-giant-suffers-major-data-breach-in-codecov-incident/) or [having their source code exposed to attackers](https://www.bleepingcomputer.com/news/security/codecov-hackers-gained-access-to-mondaycom-source-code/) in the weeks that followed.\n\n## Web supply chain security from within DevSecOps\n\n[DevSecOps](/topics/devsecops/) is a key resource in the global push toward more secure supply chains.\n\nThe whole premise of DevSecOps is to ingrain security controls throughout the entire software development lifecycle. Companies must adopt a multi-layered, defense-in-depth posture to reduce the risk of web supply chain attacks, which is ideally integrated into their DevSecOps workflow. Adopting DevSecOps practices will provide businesses with much-needed **visibility** and **control** over their website supply chain.\n\nThe [GitLab DevOps platform](/solutions/devops-platform/) provides the necessary layers of protection for improved web supply chain security in a single application.\n\nFirst, GitLab automates the process of **scanning the application** using [several tools](/stages-devops-lifecycle/secure/) and techniques, such as SAST, DAST, dependency, container scanning, secrets detection, and fuzz testing (including API fuzzing). This robust scanning increases visibility over potentially insecure third-party code, while also giving full visibility into all code changes before they are pushed to the main branch.\n\nWhile vulnerability scanning is an important step to minimize exposure to web supply chain attacks, the source code of the application is still exposed at the client-side and can be reverse-engineered or tampered with by attackers during the recon stage of the attack. To address this risk, GitLab provides **source code protection** through an [integration with Jscrambler](/blog/how-to-protect-your-source-code-with-gitlab-and-jscrambler/). [Jscrambler](https://jscrambler.com/?utm_source=about.gitlab.com&utm_medium=referral&utm_campaign=key-lessons-solarwinds) adds key security layers such as obfuscation, code locks, and runtime protection, which thwart static and dynamic code analysis and locks out attackers.\n\nGitLab's integration with Jscrambler also provides access to additional security layers that bring the required **visibility** and **control** over web supply chain attacks at runtime. One of these key layers is an [inventory of all the scripts running on the website](https://jscrambler.com/free-website-inventory-report?utm_source=about.gitlab.com&utm_medium=referral&utm_campaign=key-lessons-solarwinds) and network requests, providing real-time alerts whenever malicious behavior is detected at the client-side. When coupled with **Jscrambler's powerful rules engine**, GitLab provides a [zero-trust](/blog/tags.html#zero-trust) approach to website security, blocking any malicious behavior originating from third-party code.\n\nFinally, being a true end-to-end DevOps platform, GitLab has built-in security features that simplify the process of continuous iteration. This is key for any defense-in-depth strategy: Providing enough simplicity to enable security within any organization.\n\n## Supply chain security becomes new global priority\n\nThere is no question that the SolarWinds supply chain attack is one for the ages, prompting a necessary global push for improved supply chain cybersecurity and highlighted the importance of protecting the web supply chain.\n\nReducing exposure to web supply chain attacks requires a defense-in-depth approach that should be built into companies' DevSecOps workflows. GitLab's end-to-end DevOps platform provides multiple layers of security to address this risk, namely through integration partners such as Jscrambler.\n\nAs we see more companies try to improve their security posture by using the right tools to mitigate web supply chain attacks, I'm confident that they will soon outpace attackers and succeed in keeping billions of users safe.\n\n_Pedro Fortuna is the founder of [Jscrambler](https://jscrambler.com/?utm_source=about.gitlab.com&utm_medium=referral&utm_campaign=key-lessons-solarwinds)._\n\n[Cover image](https://unsplash.com/photos/d7FbDJkJSFw) by [Markus Spiske](https://unsplash.com/@markusspiske?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on Unsplash\n{: .note}\n",[9,875,231],{"slug":8727,"featured":6,"template":686},"what-the-solarwinds-attack-can-teach-us-about-devsecops","content:en-us:blog:what-the-solarwinds-attack-can-teach-us-about-devsecops.yml","What The Solarwinds Attack Can Teach Us About Devsecops","en-us/blog/what-the-solarwinds-attack-can-teach-us-about-devsecops.yml","en-us/blog/what-the-solarwinds-attack-can-teach-us-about-devsecops",{"_path":8733,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8734,"content":8739,"config":8744,"_id":8746,"_type":14,"title":8747,"_source":16,"_file":8748,"_stem":8749,"_extension":19},"/en-us/blog/what-to-expect-at-predict-2019",{"title":8735,"description":8736,"ogTitle":8735,"ogDescription":8736,"noIndex":6,"ogImage":4427,"ogUrl":8737,"ogSiteName":670,"ogType":671,"canonicalUrls":8737,"schema":8738},"2019 cloud native predictions from the Predict 2019 Conference","Break out your sunglasses, because the cloud native forecast for 2019 is sunny.","https://about.gitlab.com/blog/what-to-expect-at-predict-2019","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"2019 cloud native predictions from the Predict 2019 Conference\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tina Sturgis\"}],\n        \"datePublished\": \"2018-12-12\",\n      }",{"title":8735,"description":8736,"authors":8740,"heroImage":4427,"date":8741,"body":8742,"category":679,"tags":8743},[3326],"2018-12-12","\n\nGet the latest 2019 predictions from GitLab and other industry experts. [Sign me up](https://predict2019.com/#join-us)!\n{: .alert .alert-info}\n\nI love this time of year!  But it isn't for the reasons you may be thinking ... it's not the holiday decorations, shopping for gifts for loved ones ... it is about PREDICTIONS! Yep, I am a prediction junkie! I love to stop, do a little research as the end of December rolls around, reflect on what happened in that year, and begin to forecast trends I believe will emerge in the new year.\n\nThis year, one of the most exciting areas I wanted to dive into a prediction of is [cloud native](/topics/cloud-native/). It is no longer just a ‘fad,’ enterprises are realizing benefits from adopting cloud native. So I got together with my closest GitLab team-members and we dove in to provide you with our top five predictions.\n\n## Top predictions around cloud native\n\nThe basis for cloud native applications to flourish has been set and we believe that 2019 will be a great cloud native year.\n\n* Enterprises will adopt a [multi-cloud strategy](https://medium.com/gitlab-magazine/multi-cloud-maturity-model-2de185c01dd7) for their long-term investments.\n* The cloud native stack is maturing with tools like Kubernetes, Prometheus, and Envoy.\n* We are going to see a lot more on [serverless](/topics/serverless/) with the likes of Lambda and Knative.\n* We will see some real movement in the application of artificial intelligence and machine learning.\n\n## What about DevOps and security predictions?\n\nOnce we completed our research and position on cloud native predictions, we teamed up with [DevOps.com](https://www.devops.com) to participate in their on-demand virtual conference, [Predict 2019](https://predict2019.com/#join-us), that includes predictions around cloud security, DevOps, and quality testing with a [cast of speakers](https://predict2019.com/#speakers) that will educate and inspire you as you move into 2019!\n\n[Sign up now to attend Predict 2019](https://predict2019.com/#join-us)!\n{: .alert .alert-info}\n\nPhoto by [Marc Wieland](https://unsplash.com/photos/zrj-TPjcRLA?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/clouds?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,1477,1041,277],{"slug":8745,"featured":6,"template":686},"what-to-expect-at-predict-2019","content:en-us:blog:what-to-expect-at-predict-2019.yml","What To Expect At Predict 2019","en-us/blog/what-to-expect-at-predict-2019.yml","en-us/blog/what-to-expect-at-predict-2019",{"_path":8751,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8752,"content":8757,"config":8762,"_id":8764,"_type":14,"title":8765,"_source":16,"_file":8766,"_stem":8767,"_extension":19},"/en-us/blog/what-will-devops-do-for-your-team-in-2022",{"title":8753,"description":8754,"ogTitle":8753,"ogDescription":8754,"noIndex":6,"ogImage":4825,"ogUrl":8755,"ogSiteName":670,"ogType":671,"canonicalUrls":8755,"schema":8756},"What will DevOps do for your team in 2022?","DevOps brings the technical wins but business is winning too, thanks to this modern software development strategy. Here's what our latest DevOps assessment found.","https://about.gitlab.com/blog/what-will-devops-do-for-your-team-in-2022","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What will DevOps do for your team in 2022?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-01-19\",\n      }",{"title":8753,"description":8754,"authors":8758,"heroImage":4825,"date":8759,"body":8760,"category":769,"tags":8761},[851],"2022-01-19","\n\nOver the last six months, we’ve asked teams and individual contributors to assess their DevOps platform practices by answering a 20-question quiz. To date, more than 600 people have shared their experiences, providing a clear, and somewhat surprising, snapshot of DevOps as it’s done _today_. There are obvious technical wins, of course, but there are also glimpses of how DevOps and modern software development are driving business change. \n\nHere are some of the key takeaways:\n\n### DevOps is a stand up (and out) choice\t\n\nAlmost 35% of respondents say they’ve been doing DevOps for between one and three years, while 22% report they’ve been at DevOps less than a year. And 16% are in that DevOps sweet spot of between three and five years, while 15% are seasoned DevOps pros with more than five years of experience. \n\nDevOps, of course, enables faster and safer software development and it’s clearly taking teams and entire organizations along for the ride, with much greater levels of collaboration/planning and a commitment to cross-functional processes. Nearly one-quarter of respondents say everyone in their organization considers themselves to be part of the DevOps team. And 17% say security, test, and design have joined dev and ops to create their DevOps teams. \n\nBig changes are happening within those teams as well. Just shy of 30% say the traditional roles of “dev” and “ops” are definitely blurring and 16% report everyone on their team is “cross-functional”. Nearly 15% say dev, sec, ops, and test are all seeing roles change and blend together.\n\nWhen asked how teams handle planning and collaboration, 50% say their processes were either “long-established and effective” or “completely seamless and baked into everything.” Meanwhile, 43% are either just starting a planning/collaboration process or are well underway. \n\nTo put it another way, it appears DevOps drives faster releases *and* better planning and collaboration almost in equal measure. \n\n### A DevOps platform in 2022\n\nJust shy of 36% of quiz takers use an “out of the box” [DevOps platform](/solutions/devops-platform/), while only 7% are considering one. Nearly one-third of respondents say their DevOps platform is a “hybrid” affair of homegrown and purchased solutions, or what GitLab refers to as [DIY DevOps](/blog/welcome-to-the-devops-platform-era/#phase-3",[9,749,267],{"slug":8763,"featured":6,"template":686},"what-will-devops-do-for-your-team-in-2022","content:en-us:blog:what-will-devops-do-for-your-team-in-2022.yml","What Will Devops Do For Your Team In 2022","en-us/blog/what-will-devops-do-for-your-team-in-2022.yml","en-us/blog/what-will-devops-do-for-your-team-in-2022",{"_path":8769,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8770,"content":8776,"config":8780,"_id":8782,"_type":14,"title":8783,"_source":16,"_file":8784,"_stem":8785,"_extension":19},"/en-us/blog/what-you-need-to-know-about-devops-audits",{"title":8771,"description":8772,"ogTitle":8771,"ogDescription":8772,"noIndex":6,"ogImage":8773,"ogUrl":8774,"ogSiteName":670,"ogType":671,"canonicalUrls":8774,"schema":8775},"What you need to know about DevOps audits","DevOps’s many steps can streamline the audit process. Here’s how.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668339/Blog/Hero%20Images/a-tale-of-two-editors.jpg","https://about.gitlab.com/blog/what-you-need-to-know-about-devops-audits","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What you need to know about DevOps audits\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-08-31\",\n      }",{"title":8771,"description":8772,"authors":8777,"heroImage":8773,"date":7713,"body":8778,"category":769,"tags":8779},[851],"\nWhile presumably no one likes an audit, DevOps teams do have some built-in advantages when it comes to intense levels of internal and external scrutiny. Here’s a quick look at DevOps audits, why they matter, and how teams can set themselves up for audit success.\n\n## Looking under the hood\n\nIn most organizations, there are two types of audits: internal and external. At their most simplistic, internal audits are conducted by people within the existing organization, while external audits are conducted by third parties. Either way, audits look to ensure an organization is compliant, and that’s where things can get a bit complicated.\n\nBeing “compliant” can mean an organization is meeting standards set by the government (like [NIST frameworks](/blog/comply-with-nist-secure-supply-chain-framework-with-gitlab/) or HIPAA regulations), living up to its own governance rules regarding data, security policies and processes, and more, or it can mean some combination of the two. Also, depending on the type of organization and its vertical industry, compliance can have wildly different requirements.\n\nIn the end, it comes down to [being compliant](/blog/the-importance-of-compliance-in-devops/) means keeping track of any data and processes that can prove compliance is happening, and that’s what auditors need to be able to easily access.\n\nObviously, it’s a big job. Way back when, external auditors would literally set up shop in an empty office and spend weeks (or months) sifting through written records, interviewing employees, and even walking the factory floor if necessary. Today, technology, especially automation, have made audits easier to prepare for and carry out, but the plethora of standards bodies and [a growing focus on security risks](/blog/the-ultimate-guide-to-software-supply-chain-security/) mean more time spent auditing than ever before.\n\n## Enter DevOps\n\nThe largely seamless nature of DevOps not only makes it easier to get software out the door more quickly but it also streamlines the audit process. Why? Because automation tracks every step that happens, creating an auditable record, and the “continuous” nature of DevOps also naturally supports the idea of “continuous” or more frequent (and thus easier) audits.\n\n“DevOps is all about building: writing code, building code, testing code, and compiling it,” says [Sam White](/company/team/#sam.white), GitLab’s principal product manager, Protect. “And it's about getting that code built into a deliverable that's actually shipped out to the end user and runs in production. Compliance [in this sense] is all about what regulatory controls and processes have to be followed within the context of writing, building, and shipping software.”\n\n## Audits and DevOps\n\nDevOps processes naturally lend themselves to audits, White explains, because each of the steps can be traced and many, like merge requests, require signoffs. “Compliance regulations can vary across industries and geography. But, generally, what I hear from compliance teams is they need to make sure all of their commits are signed. You want to make sure you don't have a malicious actor putting in bad code. So finding the commits helps you verify who the person was who wrote the code,” he says.\n\nCode review is another obviously “auditable” step in the process, he says, because “it’s very common for organizations to require at least two people to review any code before it gets merged in.” Auditors want to follow the path and DevOps makes it simpler to look at the flow of commits/MRs and code reviews to make sure nothing untoward has happened.\n\n## Track everything\n\nWhile DevOps audit checklists [do exist](https://itrevolution.com/devops-audit-defense-toolkit/), industry compliance requirements vary so widely that a generic list is really only a starting point. But there are basic steps DevOps teams should follow:\n\n- Ensure all code commits have signoffs.\n- Review code on a regular cadence and require at least two signatures.\n- Logging tools are critical – are they widely used and is the data easy to access?\n- Make sure everyone on the team understands the concept of compliance as it relates to a particular industry.\n- Acknowledge that developers aren’t auditors 😀.\n- Check in on operations pros, who are increasingly being tasked with compliance but also report [suffering from information overload](/developer-survey/).\n\nLearn about GitLab’s vision for [compliance management](/direction/govern/compliance/compliance-management/).\n\n_Lauren Minning contributed to this blog post._\n",[9,681,875],{"slug":8781,"featured":6,"template":686},"what-you-need-to-know-about-devops-audits","content:en-us:blog:what-you-need-to-know-about-devops-audits.yml","What You Need To Know About Devops Audits","en-us/blog/what-you-need-to-know-about-devops-audits.yml","en-us/blog/what-you-need-to-know-about-devops-audits",{"_path":8787,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8788,"content":8794,"config":8798,"_id":8800,"_type":14,"title":8801,"_source":16,"_file":8802,"_stem":8803,"_extension":19},"/en-us/blog/whats-next-for-devsecops",{"title":8789,"description":8790,"ogTitle":8789,"ogDescription":8790,"noIndex":6,"ogImage":8791,"ogUrl":8792,"ogSiteName":670,"ogType":671,"canonicalUrls":8792,"schema":8793},"GitLab’s 2023 predictions: What’s next for DevSecOps?","Check out insights on securing the supply chain, new uses for AI/ML, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663820/Blog/Hero%20Images/prediction.jpg","https://about.gitlab.com/blog/whats-next-for-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab’s 2023 predictions: What’s next for DevSecOps?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2023-01-26\",\n      }",{"title":8789,"description":8790,"authors":8795,"heroImage":8791,"date":2999,"body":8796,"category":769,"tags":8797},[1454],"\nIn 2023, organizations will focus their time and resources on the continued shift left of security, completing the evolution from DevOps to [DevSecOps](/topics/devsecops/). GitLab Chief Marketing and Strategy Officer [Ashley Kramer](https://gitlab.com/akramer) says that every company will need to have security tightly integrated into DevOps to combat the increased threats throughout the software development lifecycle. In addition, DevSecOps teams will have to continue to focus on supply chain security, make optimal use of artificial intelligence and machine learning, and expand their use of value stream analytics. GitLab leaders from across disciplines share these predictions and more about how the industry will change this year.\n\n## Prediction 1: Protecting the supply chain will be the top priority\n\nSecurity will continue to be an organization-wide responsibility, shifting further left and spanning from [the IDE](/blog/get-ready-for-new-gitlab-web-ide/) to applications running in production, according to  [David DeSanto](https://gitlab.com/david), Chief Product Officer.\n\nIn our [2022 Global DevSecOps survey](https://about.gitlab.com/developer-survey/previous/2022/), 57% of sec team members said their orgs have either shifted security left or are planning to this year. Half of security professionals report that developers are failing to identify security issues – to the tune of 75% of vulnerabilities.\n\nThe shift left will be driven in part by the need for [tighter security for software supply chains](/blog/the-ultimate-guide-to-software-supply-chain-security/). “As remote development becomes more and more commonplace, software supply chain security will play a more expansive role across the software development lifecycle,” DeSanto says.\n\n[Francis Ofungwu](https://gitlab.com/fofungwu), Global Field CISO, predicts this supply chain security evolution will happen in three key ways:\n\n- The engineering frontlines will take on more ownership of managing threats in their day-to-day operations. In order to accomplish this, developers will need real-time context on vulnerabilities and remediation strategies in each phase of the software development lifecycle (SDLC), consequently reducing the likelihood of painful incidents in production environments.\n\n- Security and compliance teams will invest in transcribing their software assurance expectations into policy-as-code to reduce the manual and time-consuming security review processes that reduce development velocity.\n\n- As a result of headline-grabbing incidents highlighting enterprise risks in modern software development, organizations will build audit programs to better assess and report SDLC risks. This will require organizations to design how to deliver artifacts that prove the immutability of the controls deployed across all aspects of their development toolchain. \n\nOrganizations should also expect that “what have been best practices for supply chain security for many years, will now become regulatory requirements,” says [Corey Oas](https://gitlab.com/corey-oas), Manager, Security Compliance (Dedicated Markets). He points to [artifact attestation and software bill of materials (SBOM) generation](/blog/the-ultimate-guide-to-sboms/) as examples of best practices that will soon become federal government or industry mandates. “Both of these are integral to developer workflows.” \n\n[Sam White](https://gitlab.com/sam.white), Group Manager, Product - Govern, doubles down on the SBOM and artifact attestation prediction, saying both SBOMs and attestations will need ongoing attention from DevSecOps teams. “Expect to see a shift from looking at these as one-time events to them becoming part of a continuous evaluation process,” he says, adding that organizations will need deeper visibility into software dependencies (e.g. open source packages) and more centralization of software build information.\n\nAnother element of software supply chain security is [zero trust](/blog/why-devops-and-zero-trust-go-together/). “Organizations have considered zero trust strategies for a while, and it will be an implementation focus for them going forward,” predicts [Joel Krooswyk](https://gitlab.com/jkrooswyk), GitLab Federal CTO. “One reason for this movement, at least among federal agencies and their suppliers, is the recent release of the Department of Defense zero trust architecture strategy and roadmap and the inclusion of zero trust principles in several National Institute of Standards and Technology publications such as [800-207](https://csrc.nist.gov/publications/detail/sp/800-207/final).”\n\n> Get more public sector predictions with our webcast [“2022 Lookback & 2023 Predictions in Cybersecurity & Zero Trust with GitLab”](https://page.gitlab.com/2022_devsecopsusecase_Lookback_Predictions_PubSec_RegistrationPage.html)\n\n## Prediction 2: Security will burrow deep into DevOps education\n\nTo mirror the transformation of DevOps to DevSecOps, [DevOps training and education](/blog/5-ways-to-bring-devops-to-your-campus/) will include security as a key part of the curricula, White says. “Organizations will have to provide access to the training that developers need to get a baseline security knowledge, including why certain vulnerabilities are important and should be addressed right away,” he says.\n\n[Pj Metz](https://gitlab.com/PjMetz), Education Evangelist, believes 2023 will be the year that “Shift Left principles will show up in university classrooms.”\n\n“Already, the GitLab for Education team has seen more and more requests for information on DevSecOps, and not just in computer science and programming. Information systems students are looking to learn more about DevSecOps as well,” he says. ”Integrating security education directly into DevOps curricula will ensure that future professionals will be prepared for all aspects of DevSecOps.”\n\nAnd he encourages DevOps students to [ask for security to be added into their education](https://about.gitlab.com/the-source/security/the-future-of-devops-education-needs-to-include-security/) so they will be properly prepared for the workforce. \n\n## Prediction 3: AI/ML will be used throughout the SDLC\n\n“AI will become essential for productivity,” Kramer says. “For example, DevOps teams will integrate AI/ML to automate repetitive and difficult tasks. Ideally, this would ease the burden on developers by removing their cognitive load, decreasing the amount of context-switching they have to do, and enabling them to stay in the flow of development.\"\n\nAccording to our 2022 Global DevSecOps survey, 62% of respondents practice ModelOps, while 51% use AI/ML to check code.\n\n“Combining digital transformation with business analytics and AI - real transformations are possible,” says [Christina Hupy](https://gitlab.com/c_hupy), Sr. Manager, Community Programs. “As more of their data is input, businesses can draw actual insights and use AI to continuously improve their systems.”\n\nDeSanto agrees and predicts that [AI-assisted workflows will gain popularity](/blog/why-ai-in-devops-is-here-to-stay/) in application development. “AI/ML will further enable rapid development, security remediation, improved test automation, and better observability,” he says.\n\n[Taylor McCaslin](https://gitlab.com/tmccaslin), Group Manager of Product for Data Science, says that while AI/ML certainly isn’t new, making technologies such as open-ended AI accessible to consumers, set an expectation to figure out how it could be better used in software development (think code completion and other such tasks).\n\nHe predicts that while AI/ML will be used all along the SDLC, organizations will grapple with privacy concerns, preserving intellectual property (such as AI-generated code ownership) and permissiveness of licenses for training data sets and algorithms.\n\nAt the same time, he says to look for “more rapid development in the MLOps and DataOps spaces to help developers manage, maintain, and iterate on production software systems that leverage ML and AI.” (Note: GitLab is investing in our ModelOps stage to help support the development of data science-enriched software within the GitLab platform.)\n\n## Prediction 4: Value stream analytics will take on a greater role in organizations\n\nThe digital transformation that organizations will undergo this year will require a deeper commitment to [examining value streams](/blog/the-gitlab-quarterly-how-our-latest-beta-releases-support-developers/). “Value stream analytics will extend past development workflows to provide a more holistic view of the value organizations deliver to their users (both internal and external),” DeSanto says.\n\nExecutive leadership will seek out metrics that give insight into how digital transformation and technological investments are delivering value and driving business results. This is a shift from solely focusing on development efficiencies. The 2022 Global DevSecOps survey found that 75% of respondents are either using a DevOps platform or plan to move to one within a year with one of the drivers of this change being metrics and observability.\n\n## Prediction 5: Observability will shift left for efficient DevSecOps \n\n[Observability](/direction/monitor/platform-insights/) will also move further left in the SDLC, according to [Michael Friedrich](https://gitlab.com/dnsmichi), Senior Developer Evangelist. “Observability-driven development will enable everyone to become more efficient and inspire innovation,\" he says.\n\nNew observability-enabling technologies like [eBPF](https://ebpf.io/what-is-ebpf) will help developers with automated code instrumentation instead of adding more workload with manual code instrumentation. eBPF also supports better observability and security workflows in cloud-native environments.\n\nObservability will play a critical role in improving the efficiency of DevSecOps workflows, including CI/CD, infrastructure cost analysis, and trending/forecasting for better capacity planning.\n\n_What do you think will be the big DevSecOps technology advancements this year? Let us know your predictions in the comments below._\n\n## Engage with DevSecOps experts\n\nWant to dig deeper into how to innovate while still keeping an eye on cost efficiencies? Sign up for our webcast [“GitLab’s DevSecOps Innovations and Predictions for 2023”](https://page.gitlab.com/webcast-gitlab-devsecops-innovations-predictions-2023.html?utm_medium=blog&utm_source=gitlab&utm_campaign=devopsgtm&utm_content=fy23q4release) on Jan. 31 to get expert advice and insights about this era of DevSecOps transformation and the tools and strategies you’ll need to meet this challenge. \n[Register](https://page.gitlab.com/webcast-gitlab-devsecops-innovations-predictions-2023.html?utm_medium=blog&utm_source=gitlab&utm_campaign=devopsgtm&utm_content=fy23q4release) today!\n\nCover image by [Drew Beamer](https://unsplash.com/@dbeamer_jpg?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://www.unsplash.com/)\n{: .note}\n",[9,875,1181,916],{"slug":8799,"featured":6,"template":686},"whats-next-for-devsecops","content:en-us:blog:whats-next-for-devsecops.yml","Whats Next For Devsecops","en-us/blog/whats-next-for-devsecops.yml","en-us/blog/whats-next-for-devsecops",{"_path":8805,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8806,"content":8812,"config":8818,"_id":8820,"_type":14,"title":8821,"_source":16,"_file":8822,"_stem":8823,"_extension":19},"/en-us/blog/whats-next-for-gitlab-ci",{"title":8807,"description":8808,"ogTitle":8807,"ogDescription":8808,"noIndex":6,"ogImage":8809,"ogUrl":8810,"ogSiteName":670,"ogType":671,"canonicalUrls":8810,"schema":8811},"From 2/3 of Git market to next-Gen CI system & auto DevOps","GitLab first became the standard for self hosting git with two-thirds of the market, then became the next generation CI system, and the next step is creating Auto DevOps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679783/Blog/Hero%20Images/whats-next-for-gitlab-ci.jpg","https://about.gitlab.com/blog/whats-next-for-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"From 2/3 of the self-managed Git market, to the next-generation CI system, to Auto DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2017-06-29\",\n      }",{"title":8813,"description":8808,"authors":8814,"heroImage":8809,"date":8815,"body":8816,"category":679,"tags":8817},"From 2/3 of the self-managed Git market, to the next-generation CI system, to Auto DevOps",[1609],"2017-06-29","\n\nGitLab has transformed from offering just version control to becoming the first integrated product for DevOps. With GitLab you can go all the way from chatting about an idea to measuring it in production without spending time on configuring a bunch of tools. The version control part of GitLab is now used by 2/3 of the market that self host Git. The continuous integration (CI) part of GitLab is now the most popular next generation CI system. Today we introduce the future direction of GitLab: Auto DevOps.\n\n\u003C!-- more -->\n\nWhen we [announced our master plan in September of 2016](/blog/gitlab-master-plan/), we gave our vision for a tool that changes the way developers create software. Before the end of 2016 we [completed the master plan](/releases/2016/12/22/gitlab-8-15-released/) and introduced Auto Deploy. Auto Deploy evolved and sparked a vision for a more integrated DevOps experience. Today we have a video to present that vision of Auto DevOps.\n\n## GitLab has 2/3 market share in the self-managed Git market\n\nWith more than 100,000 organizations self-hosting GitLab, we have the largest share of companies who choose to host their own code. We’re estimated to have two-thirds of the single tenant market. When [Bitrise surveyed](http://blog.bitrise.io/2017/01/27/state-of-app-development-in-2016.html#self-hosted) ten thousand developers who build apps regularly on their platform, they found that 67 percent of self-managed apps prefer GitLab’s on-premise solution.\n\n![Image via Bitrise blog](https://about.gitlab.com/images/blogimages/bitrise-self-hosted-chart.png){: .shadow}\u003Cbr>\n\nSimilarly, in their survey of roughly one thousand development teams, [BuddyBuild found](https://www.buddybuild.com/blog/source-code-hosting#selfhosted) that 79% of mobile developers who host their own code have chosen GitLab:\n\n![Image via buddybuild blog](https://about.gitlab.com/images/blogimages/buddybuild-self-hosted-chart.png){: .shadow}\u003Cbr>\n\nIn their articles, both Bitrise and BuddyBuild note that few organizations use self-managed instances. We think there is a selection effect since both of them are SaaS-only offerings. Based on our experience, in large organizations (over 750 people), it is still more common to self host your Git server (frequently on a cloud service like AWS or GCP) than to use a SaaS service.\n\n## GitLab CI is the most popular next-generation CI system\n\nOur commitment to seamless integration extends to CI. Integrated [CI/CD](/topics/ci-cd/) is both more time and resource efficient than a set of distinct tools, and allows developers greater control over their build pipeline, so they can spot issues early and address them at a relatively low cost. Tighter integration between different stages of the development process makes it easier to cross-reference code, tests, and deployments while discussing them, allowing you to see the full context and iterate much more rapidly. We've heard from customers like [Ticketmaster](/blog/continuous-integration-ticketmaster/) that adopting GitLab CI can transform the entire software development lifecycle (SDLC), in their case helping the Ticketmaster mobile development team deliver on the longstanding goal of weekly releases. As more and more companies look to embrace CI as part of their development methodology, having CI fully integrated into their overall SDLC solution will ensure these companies are able to realize the full potential of CI. You can read more about the benefits of integrated CI in our white paper, [Scaling Continuous Integration](http://get.gitlab.com/scaled-ci-cd/).\n\nIn his post on [building Heroku CI](https://blog.heroku.com/building-tools-for-developers-heroku-ci), Heroku’s Ike DeLorenzo noted that GitLab CI is “clearly the biggest mover in activity on Stack Overflow,” with more popularity than both Travis CI and CircleCI:\n\n![Image via Heroku blog](https://about.gitlab.com/images/blogimages/heroku-questions-chart.png){: .shadow}\u003Cbr>\n\nWhile the use of Jenkins for CI is still higher than any other solution, we see more and more organizations moving from Jenkins, because upgrading their Jenkins server is a brittle process. The last two big things that GitLab CI lacked were scheduled builds (contributed to [GitLab 9.2](/releases/2017/05/22/gitlab-9-2-released/)) and cross-project builds (released in [GitLab 9.3 on June 22](/releases/2017/06/22/gitlab-9-3-released/)).\n\n## Auto DevOps is next\n\nWe want to [deliver more of idea to production](https://gitlab.com/gitlab-org/gitlab-ce/issues/32639) and continue to make the flow even better. [Our direction](/direction/#ci--cd) is to fully automate DevOps with the concept of [Auto DevOps](https://gitlab.com/gitlab-org/gitlab-ee/issues/2517). In a cloud-native world, developers have many projects, and it doesn't make sense to have to set up their tools for every one of them. With help from the wider community we'll ensure that everything works out of the box, from code quality metrics to Review Apps, and from metrics to autoscaling.\n\nWatch our Head of Product Mark Pundsack demonstrate our Auto DevOps vision, including Auto Create, Auto Build, Auto CI, Auto Deploy, Auto Code Quality, and Auto Review Apps:\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/KGrJguM361c?rel=0\" frameborder=\"0\" allowfullscreen>\u003C/iframe>\n\nWe couldn't have built GitLab into the tool and company it is today without the contributions of the wider community, and the feedback from our customers. We're excited to see what you build with GitLab.\n\nHave thoughts about Auto DevOps? Comment on this blog post or on [the issue for Auto DevOps](https://gitlab.com/gitlab-org/gitlab-ee/issues/2517). Interested in what your team can do with GitLab Enterprise Edition? [Sign up for a free trial](/free-trial/) and let us know what you think.\n",[1789,9,726,109],{"slug":8819,"featured":6,"template":686},"whats-next-for-gitlab-ci","content:en-us:blog:whats-next-for-gitlab-ci.yml","Whats Next For Gitlab Ci","en-us/blog/whats-next-for-gitlab-ci.yml","en-us/blog/whats-next-for-gitlab-ci",{"_path":8825,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8826,"content":8832,"config":8838,"_id":8840,"_type":14,"title":8841,"_source":16,"_file":8842,"_stem":8843,"_extension":19},"/en-us/blog/whats-wrong-with-devops",{"title":8827,"description":8828,"ogTitle":8827,"ogDescription":8828,"noIndex":6,"ogImage":8829,"ogUrl":8830,"ogSiteName":670,"ogType":671,"canonicalUrls":8830,"schema":8831},"3 things that are wrong with DevOps today","Why are collaboration woes, shift-left waste, and tooling admin costs still plaguing DevOps?","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680211/Blog/Hero%20Images/what-is-wrong-with-devops.jpg","https://about.gitlab.com/blog/whats-wrong-with-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"3 things that are wrong with DevOps today\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Joel Krooswyk\"}],\n        \"datePublished\": \"2018-02-20\",\n      }",{"title":8827,"description":8828,"authors":8833,"heroImage":8829,"date":8835,"body":8836,"category":679,"tags":8837},[8834],"Joel Krooswyk","2018-02-20","\n\nI’m continually impressed by the benefits achieved by modern ways of working. Lean processes, [Conversational Development](http://conversationaldevelopment.com/), and automation have helped us ship more value, faster. Those achievements have led customers to expect a lot more from their service providers. DevOps has been critical to those gains, but we’ve got more work to do – DevOps still has its problems.\n\n\u003C!-- more -->\n\nI have the privilege of talking with GitLab users every day. We celebrate impressive technical achievements, work through complex problems with CI/CD, or discuss new needs for their organization. The needs and problems seem to align themselves to one of three different areas:\n\n## 1. The wall still stands\n\nDev and Ops are still at war in some environments. In just the past couple of weeks I’ve heard the lack of collaboration between these groups called “the wall,” a “chasm,” and a “joke” by people in both areas! We’re simply not communicating well enough yet. We’re disappointed that after this much investment, there’s still so much room for improvement. Development and Operations continue to use different tools and to follow different rules.\n\n>It's like we're really doing DevSecBizPerfOps\n\nBut it doesn't end there. Now we've got more people in the mix analyzing concerns like security, performance, and business metrics. It's like we're really doing DevSecBizPerfOps or some such thing, and so our flow continues to be interrupted. Silos continue to exist, if not multiply. It also feels like Ops hasn’t gotten enough love, which is why GitLab is working toward better Operations views as part of our [product vision](/blog/devops-strategy/) for 2018.\n\n## 2. Administration costs are still too high\n\nAs we continue to [shift left with build, test, and security](/solutions/security-compliance/), admin costs continue to rise. Developers are often being empowered at the cost of their own productivity. Administration efforts can actually consume [half a developer’s time](https://www.infoworld.com/article/2613762/application-development/software-engineers-spend-lots-of-time-not-building-software.html) each week! Unfortunately, this is a growing form of waste. A core DevOps goal is to reduce administration time, but the admin costs of DevOps tools can be some of the highest in the software development lifecycle ecosystem due to extensive plug-in architectures, support of quickly evolving environments, and asynchronous vendor update woes. We continually increase complexity and add requirements to existing stacks without looking for more modern solutions. Despite all the loss of time, I still hear commonly that there's no way to visualize the flow of the code from requirement to production, especially once code is committed to a repository.\n\nThe good news is that more of us are taking the time to re-examine our ecosystems because they've become bloated with a wide variety of tools from a wide variety of vendors for very specific purposes. I wouldn't consider the current trend to be a tooling consolidation so much as a streamlining or simplification of toolsets. Questions I hear most often tend to focus on optimizing our efficiency and reliability while minimizing administration of laborious plug-in and trigger-driven architectures. We're trending in the right direction.\n\n## 3. We're holding onto the past\n\nWe’ve spent and continue to spend billions on software tools annually. Tooling can be extremely costly! Sometimes we’ve invested so much money in old tooling that we simply can’t let it go. Too often we hold onto tools and processes just because we spent a lot of time and money on them while newer, time-saving products are available for less than the cost of the renewal of the old beasts. And so we hold onto the past as we try to implement new technologies. It’s no surprise that shoving new technology into old tools can generate enormous friction and unique problems.\n\n>It’s no surprise that shoving new technology into old tools can generate enormous friction and unique problems.\n\nPerhaps we bought best-in-breed tools. Those products commonly require excessive coding efforts to integrate and maintain because \"best in breed\" typically means we bought from a number of vendors. Interconnectivity of those tools typically doesn’t come out of the box. And of course, once the API is mentioned as a solution, the admin and maintenance burden increases once again. We spend a lot of money on specific solutions but inevitably end up with holes in our end-to-end process, too often as it relates to security or performance.\n\nBut this way of looking at tooling is beginning to change! I'm hearing more frequently that dramatic price increases, as well as the outsourcing of product maintenance and support, are triggering enterprises to reconsider the past. When we've invested all that time and money into a product, but that product then gets sold to three different parent companies within a decade, our ROI calculations lose their luster. Outsourcings and vendor-level product sales are being viewed as indicators of a potentially declining market. Enterprises are using that as a trigger to seek out updated tools for the years ahead, reducing cost and enabling modern workflows.\n\n## It all impacts delivery efficiency\n\nNo matter whether we’re talking about disappointment in collaboration, shift-left waste, or tooling admin costs, it comes down to this: it all negatively impacts our ability to deliver securely with speed and efficiency. If we truly want to meet and exceed the expectations of our customers, we’ll need to continually hone and improve our DevOps processes and tools to reflect modern ways of working.\n\n[Photo](https://unsplash.com/photos/suaBxarUnyo?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) by Caleb George on [Unsplash](https://unsplash.com/search/photos/wall?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,683,749],{"slug":8839,"featured":6,"template":686},"whats-wrong-with-devops","content:en-us:blog:whats-wrong-with-devops.yml","Whats Wrong With Devops","en-us/blog/whats-wrong-with-devops.yml","en-us/blog/whats-wrong-with-devops",{"_path":8845,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8846,"content":8852,"config":8856,"_id":8858,"_type":14,"title":8859,"_source":16,"_file":8860,"_stem":8861,"_extension":19},"/en-us/blog/where-to-donate-your-devops-skills",{"title":8847,"description":8848,"ogTitle":8847,"ogDescription":8848,"noIndex":6,"ogImage":8849,"ogUrl":8850,"ogSiteName":670,"ogType":671,"canonicalUrls":8850,"schema":8851},"Where to donate your DevOps skills","Want to feel great and help fill the DevOps talent pipeline? Here are some rewarding opportunities to donate your tech knowledge to others.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683269/Blog/Hero%20Images/clark-tibbs-oqstl2l5oxi-unsplash.jpg","https://about.gitlab.com/blog/where-to-donate-your-devops-skills","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Where to donate your DevOps skills\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2021-12-08\",\n      }",{"title":8847,"description":8848,"authors":8853,"heroImage":8849,"date":2493,"body":8854,"category":813,"tags":8855},[1454],"\n\nYour technical knowledge could be a gift to someone else. Nonprofits around the world are seeking talented professionals to mentor, volunteer their technical skills, or teach courses in-person and online. The donations of your time and expertise could change the lives of people in your community or halfway around the world. The more access underrepresented groups, including women and minorities, have to skills development and mentoring that lead to higher-paying jobs, the better their chances of rising up out of difficult socio-economic conditions. And don’t forget, volunteering is incredibly rewarding.\n\nHere are six organizations and tech communities that could benefit from your skills and experience: \n\n## [Outreachy](https://www.outreachy.org/)\n\nOutreachy provides internships in open source to people subject to systemic bias and impacted by underrepresentation in the technical industry where they are living. Outreachy interns work with experienced mentors from open source communities. Internship projects may include programming, user experience, documentation, graphical design, data science, marketing, user advocacy, or community event planning.\n\nGitLab has participated in the Outreachy internship program, which intersects with our [Diversity, Inclusion, and Belonging](https://handbook.gitlab.com/handbook/values/#diversity-inclusion) value. Our team, including Senior Backend Engineer [Christian Couder](https://gitlab.com/chriscool), wrote [about their experience](https://about.gitlab.com/blog/outreachy-sponsorship-winter-2020/): “One of the benefits of the Outreachy technology internship is that the interns do not need to be students. It's a great opportunity for people who are coming back into the workforce after a hiatus, or who are navigating a career change into tech. This technology internship program is unique because it incorporates skill sets beyond engineering – which creates a broader range of skill sets represented in the open source world. The Outreachy internship is remote, making it more relevant than ever during the pandemic by helping interns gain experience working on an all-remote team.”\n\n## [CodeYourFuture](https://codeyourfuture.io/)\n\nCodeYourFuture is a U.K.-based nonprofit that trains “some of the most deprived members of society” to become web developers and helps them to find work in the tech industry. Students are trained in full-stack web development by volunteers from the tech industry, putting a strong emphasis on collaboration and product development through tech projects.\n\nSenior Frontend Engineer [Coung Ngo](https://gitlab.com/cngo) contributes his time to the nonprofit and says that while DevOps is not in the syllabus, the underlying skill sets are. “They teach a full-stack course of HTML, CSS, JS, React, Node, and SQL/MongoDB,” Ngo says. “It's a nice community, so if someone lives in London, Glasgow, Manchester, or Birmingham, it's enjoyable to join in with the in-person classes.”\n\n## [WeThinkCode](https://www.wethinkcode.co.za/about)\n\nWeThinkCode is a nonprofit aimed at closing the skills gap in the digital sector in South Africa and preparing young people to participate in the region’s economy. The organization believes that South Africa’s youth represent a pool of talent that mostly remains untapped and wants to provide businesses with access to this source of tech talent. Nonprofits like WeThinkCode are important because of the way they mobilize previously underestimated groups by providing an avenue for education where traditional paths are often more closed off. \n\nWeThinkCode was a GitLab donation recipient and the organization utilizes GitLab’s free SaaS version in their curriculum. The organization has [four ways for professionals to volunteer](https://www.wethinkcode.co.za/volunteer), including virtual opportunities:\n\n- Mentorship - Experienced software development practitioners provide guidance to a group of four to six students on communication, ways of working, insights on tackling programming challenges in the curriculum, and tips on the conduct expected in the workplace. \n \n- Interview readiness - Practitioners with experience in hiring and recruiting will help conduct mock interviews and then provide constructive feedback.\n\n- WomenThinkCode Meetups - Women in tech to act as role models and deliver talks about their career journeys covering tech and interpersonal aspects.\n\n- Community-hosted talks - Practitioners deliver talks on the real-world application of various technologies.\n\n## [KodewithKlossy](https://www.kodewithklossy.com/)\n\nKodewithKlossy is a nonprofit with the mission to create learning experiences and opportunities for young women and nonbinary individuals that increase their confidence and inspire them to pursue their passions in a technology-driven world. KodewithKlossy found that prior to camp, only two out of 10 attendees (also called scholars) had computer science experience and after, as a result of their camp experience, nine in 10 say they plan to pursue education and opportunities in computer science. Volunteers can serve as role models in the camp speaker series or participate in other important ways.\n\n## [Google Summer of Code](https://summerofcode.withgoogle.com/archive/)\n\nGoogle Summer of Code (GSoC) is a global program focused on bringing more student developers into open source software development. Students work on a three-month programming project with an open source organization during their break from university. \n\nIn 2022, Google will [expand its GSOC enrollment](https://opensource.googleblog.com/2021/11/expanding-google-summer-of-code-in-2022.html) beyond students to include all newcomers to open source who are 18 years and older. Google states, “We realize there are many folks that could benefit from the GSoC program that are at various stages of their career, recent career changers, self-taught, those returning to the workforce, etc. so we wanted to allow these folks the opportunity to participate in GSoC.”\n\nGitLab [participated this year](https://summerofcode.withgoogle.com/archive/2021/organizations/5396515480141824/), helping to mentor students, and Couder has been a mentor since 2008.\n\n## Open source communities\n\nOpen source communities like Cloud Native Computing Foundation [(CNCF)](https://www.cncf.io/), which include students and people who are changing careers, are a fantastic outlet to share your DevOps expertise. You can help other community members improve their features or applications, learn about documentation, learn new languages, and uncover bugs. Senior Developer Evangelist [Michael Friedrich](https://gitlab.com/dnsmichi) says it is rewarding to become a mentor in open source communities. “It is important to be honest, but also to be patient and kind. Don’t say something is easy – it’s not easy for that person. Instead, make sure to share your expertise in a constructive and helpful way,” he adds. Listen to more of Friedrich’s [advice for open source contributions](https://www.youtube.com/watch?v=yT63olXdS-I).\n\n_Cover image by Clark Tibbs via [Unsplash](https://unsplash.com/)._\n",[9,682,267],{"slug":8857,"featured":6,"template":686},"where-to-donate-your-devops-skills","content:en-us:blog:where-to-donate-your-devops-skills.yml","Where To Donate Your Devops Skills","en-us/blog/where-to-donate-your-devops-skills.yml","en-us/blog/where-to-donate-your-devops-skills",{"_path":8863,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8864,"content":8869,"config":8874,"_id":8876,"_type":14,"title":8877,"_source":16,"_file":8878,"_stem":8879,"_extension":19},"/en-us/blog/why-ai-in-devops-is-here-to-stay",{"title":8865,"description":8866,"ogTitle":8865,"ogDescription":8866,"noIndex":6,"ogImage":3480,"ogUrl":8867,"ogSiteName":670,"ogType":671,"canonicalUrls":8867,"schema":8868},"Why AI in DevOps is here to stay","Two years ago artificial intelligence wasn't part of mainstream software development. Now AI in DevOps is seemingly everywhere. Here's why.","https://about.gitlab.com/blog/why-ai-in-devops-is-here-to-stay","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why AI in DevOps is here to stay\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-09-15\",\n      }",{"title":8865,"description":8866,"authors":8870,"heroImage":3480,"date":8871,"body":8872,"category":769,"tags":8873},[851],"2022-09-15","\nIn 2020, respondents to our annual Global DevSecOps Survey started mentioning artificial intelligence and machine learning for the first time. In that survey, roughly 16% of respondents were using “bots” to test code, or were planning to, while 12% of devs said knowledge of AI/ML would be critical to their future.\n\nFast forward just two years and [AI in DevOps](/topics/devops/the-role-of-ai-in-devops/) is a reality in teams around the world, according to our [2022 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/previous/2022/). \n\n- 24% of respondents said their DevOps practices include AI/ML, more than double the 2021 percentage.\n\n- 31% of teams are using AI/ML for code review, 16 points higher than last year. \n\n- Today 37% of teams use AI/ML in software testing (up from 25% in 2021), and 20% plan to introduce it this year. Another 19% plan to roll out AI/ML-powered testing in the next two to three years.\n\n- Fully 62% of survey takers are practicing ModelOps.\n\n- 51% use AI/ML to check (not test) code. \n\nAll told, only 5% of teams said they had _no plans_ to incorporate AI in DevOps.\n\nHere's a snapshot of where AI in DevOps is today and why, despite some challenges, AI will likely play an increasingly important role.\n\n## Why AI in DevOps\n\nIn many ways, [DevOps and AI/ML](/blog/ai-in-software-development/) are the perfect marriage: DevOps requires automation to reach maximum efficiency and AI/ML are obvious choices to tackle repetitive tasks. Imagine adding team members entirely focused on a single job, with incredible attention to detail and no need for vacations or even a coffee break – that’s an ML “bot” in a nutshell. \n\nWhen we asked DevOps teams what the most common reasons were for [software release delays](/blog/top-reasons-for-software-release-delays/), the answers called out steps that are critical but manual, tedious, time-consuming, and potentially rife with errors: [software testing](/blog/the-gitlab-guide-to-modern-software-testing/), code review, security testing and code development. For many teams, AI/ML could be key in streamlining these processes.\n\n## Smarter software testing\n\nNo DevOps process is perhaps in more need of streamlining than software testing, which is no doubt why teams have been adding AI/ML into the mix for several years now. Testing is that process [everyone loves to hate](/blog/the-software-testing-life-cycle-in-2021-a-more-upbeat-outlook/), but it is also the step that needs to happen more often in all the ways, or at least that’s what developers tell us year after year. But there are so many different kinds of tests, limited development time, and even more constrained QA teams. Machine learning bots can help bridge the manpower gap, freeing up resources to focus on tests best done by humans. \n\nAnd increased testing creates another issue – test data management – that could ideally be triaged and dealt with using AI.\n\n## The benefits of ModelOps\n\nAI/ML solutions have also made their way into other DevOps steps, specifically [ModelOps](/direction/modelops/). Not only is this an area GitLab is focusing on ([beginning with smarter code reviews](/blog/the-road-to-smarter-code-reviewer-recommendations/)), but more than half of DevOps teams report they’re exploring what’s involved in bringing data science and operations together. \n\n## Beware the learning curve\n\nArtificial intelligence and machine learning are not without their challenges, however. In our 2022 survey, developers expressed very real concerns about the steep learning curves involved in the technology adoption. “Technology is rapidly changing,” was a thought shared by many developers, alongside “implementing AI is an enormous challenge.” \n\nOne developer summed it up: “4G, 5G, AI, Metaverse, virtual space - developers have to support all of this.”\n\nBrendan O'Leary, [staff developer evangelist at GitLab](/company/team/#brendan), says AI naturally has a big learning curve because it requires experimentation. \"This is not just a programming language,\" he explains. \"We've got some data and a hypothesis around it and AI is what's going to help us prove it. This is a different kind of experiment than other kinds of coding... we've got to learn how to measure the impact, understand it, and iterate on it. It's a different kind of paradigm.\"\n",[9,681,231],{"slug":8875,"featured":6,"template":686},"why-ai-in-devops-is-here-to-stay","content:en-us:blog:why-ai-in-devops-is-here-to-stay.yml","Why Ai In Devops Is Here To Stay","en-us/blog/why-ai-in-devops-is-here-to-stay.yml","en-us/blog/why-ai-in-devops-is-here-to-stay",{"_path":8881,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8882,"content":8888,"config":8893,"_id":8895,"_type":14,"title":8896,"_source":16,"_file":8897,"_stem":8898,"_extension":19},"/en-us/blog/why-devops-and-zero-trust-go-together",{"title":8883,"description":8884,"ogTitle":8883,"ogDescription":8884,"noIndex":6,"ogImage":8885,"ogUrl":8886,"ogSiteName":670,"ogType":671,"canonicalUrls":8886,"schema":8887},"Why DevOps and zero trust go together","Learn how DevOps and zero trust have matured into a solid pairing and the security considerations that come into play.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683257/Blog/Hero%20Images/devopszerotrust.jpg","https://about.gitlab.com/blog/why-devops-and-zero-trust-go-together","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why DevOps and zero trust go together\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2022-08-17\",\n      }",{"title":8883,"description":8884,"authors":8889,"heroImage":8885,"date":8890,"body":8891,"category":875,"tags":8892},[1454],"2022-08-17","\n\nWhen the concept of zero trust was first [introduced in 2010 by Forrester Research](https://media.paloaltonetworks.com/documents/Forrester-No-More-Chewy-Centers.pdf), it seemed directly aimed at enterprise security professionals, who were struggling to keep the network perimeter safe from breaches and attacks. As enterprises and zero trust frameworks have evolved, DevOps has become the perfect home for these principles.\n\nZero trust requires all users – human and machine, internal or external – to be authenticated, authorized, and continuously validated to first access and continue to access resources. These requirements are fully aligned with modern application development and the advent of DevSecOps, where security continues to shift left in the development life cycle.\n\nIn 2019, GitLab Staff Security Engineer [Mark Loveless](/company/team/#mloveless) began to examine the [opportunities in marrying DevOps and zero trust](/blog/evolution-of-zero-trust/). Much has changed since then, including a greater acceptance, adoption, and, in some cases, requirement of zero trust frameworks. For instance, in its [executive order on cybersecurity](https://www.whitehouse.gov/briefing-room/presidential-actions/2021/05/12/executive-order-on-improving-the-nations-cybersecurity/), the Biden administration referenced zero trust and the National Institute of Standards and Technology (NIST) called out zero trust architecture as an approach to its [Secure Software Development Framework](/blog/comply-with-nist-secure-supply-chain-framework-with-gitlab/) standard.\n\n## Addressing zero trust confusion\n\nAs zero trust strategies have become more popular, confusion in the market has increased. For instance, zero trust is not a single product or service – it is a strategy applied to a security framework.\n\n“Companies are marketing their zero trust solutions as _THE_ solution. They claim that zero trust solves everything wrong and you’ll be secure. No single solution out there addresses all of the authentication problems that organizations encounter,” Loveless says.\n\nAnother point of confusion, according to Loveless, is the fact that some early zero-trust backers have not evolved with zero trust itself. “The core beginnings of zero trust go back a couple of decades, originally centered around users and specific systems. There is an entire world of newer technology, including the cloud, automation, and AI, that has emerged since then that is out there and completely underrepresented in approaches to zero trust,” he says.\n\n## How zero trust fits into modern DevOps\n\nZero trust has three core components that must be fully understood to be able to map it to modern application development:\n\n- Data must be protected. Before the data can be accessed, the identity of who or what (in the case of automation) is accessing the data needs to be determined and a decision has to be made as to whether that access will be granted.\n\n- The identity must be extremely specific. The requestor must be proven, preferably by cryptographic means, to be who or what they say they are.\n\n- A secure channel for accessing the data must be able to be established. After authentication, data in transit should be protected by a secure channel and that data should only be revealed to the requestor.\n\nWhere zero trust strategies often go astray is assuming that the requestor is human. As automation becomes more prevalent in DevOps, DevSecOps must account for the likelihood that a requestor could be automated. But this inevitably raises questions, according to Loveless, such as:\n\n- Is the automated request coming from a trusted device?\n- Who initiated the action that led to the automated process requesting the data?\n- Was it an automated process that kicked off a secondary automated process that is now requesting the data?\n- Does the person that set up the automated processes still have access to these processes’ credentials?\n\nLoveless says organizations might need to rethink their authentication and authorization approaches to get the most out of the DevOps-zero trust pairing because automation requires a greater level of sophistication. “Mutual authentication strategies like managing your own certificate authority or setting up mutual TLS can be challenging,” Loveless says. Instead, organizations might consider [implementing automated multifactor authentication tools such as OpenID Connect](https://docs.gitlab.com/ee/integration/openid_connect_provider.html). “One solution might negate another solution, or solving for one cloud provider might exclude another, creating limits,” he says.  \n\n## How GitLab’s DevOps Platform supports zero trust\n\nGitLab’s cohesion with zero trust stems largely from its belief that it is not a single solution to zero trust, but instead part of an ecosystem in support of zero trust principles. \n\nOrganizations can utilize GitLab to enact its zero trust framework, including the ability to:\n- set and enforce granular role-based access for all users and machines\n- authenticate users and machines before allowing access\n- require continuous authentication and authorization\n- monitor the security status of users and machines and quickly respond to issues\n- classify data and set and enforce access levels accordingly\n- audit data access in real-time and generate compliance reports\n\n## Going forward\n\nGitLab’s commitment to zero trust is foundational and ongoing. As zero trust frameworks evolve and more standards bodies require adherence to zero trust principles, GitLab will continue to be a trusted partner in meeting these demands.\n\nCover image by Max Tcvetkov on [Unsplash](https://unsplash.com/@your_scorpion?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n\n",[9,875,916],{"slug":8894,"featured":6,"template":686},"why-devops-and-zero-trust-go-together","content:en-us:blog:why-devops-and-zero-trust-go-together.yml","Why Devops And Zero Trust Go Together","en-us/blog/why-devops-and-zero-trust-go-together.yml","en-us/blog/why-devops-and-zero-trust-go-together",{"_path":8900,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8901,"content":8906,"config":8910,"_id":8912,"_type":14,"title":8913,"_source":16,"_file":8914,"_stem":8915,"_extension":19},"/en-us/blog/why-devops-collaboration-continues-to-be-important",{"title":8902,"description":8903,"ogTitle":8902,"ogDescription":8903,"noIndex":6,"ogImage":928,"ogUrl":8904,"ogSiteName":670,"ogType":671,"canonicalUrls":8904,"schema":8905},"Why DevOps collaboration continues to be important","Modern DevOps isn't just about tech adoption and new processes. DevOps collaboration is going to play a key role. Here's why.","https://about.gitlab.com/blog/why-devops-collaboration-continues-to-be-important","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why DevOps collaboration continues to be important\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-10-25\",\n      }",{"title":8902,"description":8903,"authors":8907,"heroImage":928,"date":3544,"body":8908,"category":769,"tags":8909},[851],"\nIt’s tempting to think the concept of DevOps collaboration is something no one needs to talk about anymore. After all, the methodology has been around for nearly 15 years, is in widespread use, and has clearly proven to be successful at getting safer software out the door faster. Haven’t we figured out DevOps collaboration by now?\n\nThe answer is no, at least according to our [2022 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/previous/2022/) and to industry experts looking at the future of DevOps.\n\nFor starters, dev and ops respondents to our survey told us programming languages and soft skills like collaboration are going to be most important for their careers going forward. DevOps collaboration was the second most important skill for sec pros surveyed. These results were far from a one-off: In our [2020 survey](/images/developer-survey/gitlab-devsecops-2021-survey-results.pdf), dev, sec, and ops were unanimous that “soft skills,” including DevOps collaboration, were the most critical for future careers. In [2021](/images/developer-survey/gitlab-devsecops-2021-survey-results.pdf), sec and ops continued to prioritize DevOps collaboration for the future, while devs opted for AI/ML. \n\nThis year, we asked over 5,000 survey takers what would be most important to their careers, but we didn’t ask *why* it would be so important. A look at some recent thought leadership around DevOps collaboration sheds some light.\n\nAccording to [an article in SDX Central](https://www.sdxcentral.com/articles/analysis/devops-its-about-the-people/2022/07/), pundits think collaboration is “critical for DevOps success” today and in the future. An [article in Tech Beacon](https://techbeacon.com/app-dev-testing/future-devops) goes further, suggesting DevOps will embrace business metrics as a measure of success going forward, and, as such, will require levels of cross-functional collaboration not seen before. \n\nIn other words, as DevOps expands beyond a technology goal (develop software) to a business goal (ensure customer satisfaction or business profitability), more teams will be seated at the table. The more people involved, the more DevOps collaboration will be critical to the future.\n\nWe’d like to know how DevOps collaboration works on _your_ team. Our 12-question survey will take you less than four minutes! [Take the survey!](/blog/take-our-survey-on-collaborative-software-development/)\n",[9,681,749],{"slug":8911,"featured":6,"template":686},"why-devops-collaboration-continues-to-be-important","content:en-us:blog:why-devops-collaboration-continues-to-be-important.yml","Why Devops Collaboration Continues To Be Important","en-us/blog/why-devops-collaboration-continues-to-be-important.yml","en-us/blog/why-devops-collaboration-continues-to-be-important",{"_path":8917,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8918,"content":8923,"config":8928,"_id":8930,"_type":14,"title":8931,"_source":16,"_file":8932,"_stem":8933,"_extension":19},"/en-us/blog/why-did-we-choose-to-integrate-fluxcd-with-gitlab",{"title":8919,"description":8920,"ogTitle":8919,"ogDescription":8920,"noIndex":6,"ogImage":7483,"ogUrl":8921,"ogSiteName":670,"ogType":671,"canonicalUrls":8921,"schema":8922},"GitOps with GitLab: What you need to know about the Flux CD integration","Inside the decision to integrate Flux CD with the GitLab agent for Kubernetes and what it means to you.","https://about.gitlab.com/blog/why-did-we-choose-to-integrate-fluxcd-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitOps with GitLab: What you need to know about the Flux CD integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2023-02-08\",\n      }",{"title":8919,"description":8920,"authors":8924,"heroImage":7483,"date":8925,"body":8926,"category":791,"tags":8927},[1356],"2023-02-08","\n\nIn January, [we decided to integrate Flux CD with the GitLab agent for Kubernetes](https://gitlab.com/gitlab-org/gitlab/-/issues/357947). [Flux CD](https://fluxcd.io/) is a mature GitOps solution and one of the market leaders in the area. We have since decided to make Flux CD our recommended approach to do GitOps with GitLab – previously, the agent for Kubernetes alone was the recommended approach. Let's discuss what this change means for current users and what our plans are for the integration.\n\nFirst of all, let's remove the most worrying thought from the agenda: We are not deprecating any agent for Kubernetes functionality at this point. The GitOps offering remains fully supported and transitions to maintenance mode. We plan to deprecate it with at least one year of removal time once we consider the Flux integration solid. As a result, the removal is unlikely before the GitLab 17.0 release, which is expected in 2024. We are looking into providing tooling to facilitate (or automate) the migration once the time comes. If you use the agent for Kubernetes for GitOps, you don't have to do anything at this time.\n\nThis change does not affect the agent's other non-GitOps functionality either. The [CI/CD pipeline integration](https://docs.gitlab.com/ee/user/clusters/agent/ci_cd_workflow.html) and [operational container scanning](https://docs.gitlab.com/ee/user/clusters/agent/vulnerabilities.html) remain intact, and we will continue investing in them.\n\n## What to expect from this change\n\nFrom now on, instead of building our solution for GitOps, we will focus on supporting Flux and improving its user experience when it is used together with GitLab. Flux CD will become the recommended tool to do GitOps with GitLab. Initially, we will provide documentation on the Flux setup we recommend for our users while we focus on building out various integrations.\n\nIn terms of the integrations, we are looking at providing a UI built into GitLab. You might also be able to use the UI with other tools, including the CI pipeline integration of the agent, but it will work best with deployments managed by Flux. Besides the UI integration, we want to streamline Flux's access management. Flux accesses GitLab through the regular GitLab front door. As a result, it needs to authenticate with a token, requests might be rate-limited, and, in general, it does not seem to be the most efficient way to do its job. We plan to simplify this for our users to avoid the necessity of managing dozens of deploy keys and to decrease the load on GitLab at the same time.\n\n## Why Flux?\n\nWhy did we choose Flux CD instead of something else? We evaluated several options. There are other open-source GitOps tools. The biggest contender was [ArgoCD](https://argoproj.github.io/cd), another mature Cloud Native Computing Foundation project in the GitOps space. ArgoCD is a full-featured product for GitOps, while Flux is a GitOps toolkit. While we like and value ArgoCD a lot, we think it does not lend itself to integration with GitLab.\n\nAs we are already in the process of building out UI integrations with the cluster, we know how the GitLab UI will be able to reach the Kubernetes API. Flux relies on the standard Kubernetes API 100%, so we can easily integrate it into our UI access approach. Relying only on the Kubernetes API is a significant benefit over ArgoCD, which provides a custom API.\n\nBesides going with another tool, we evaluated the work needed to build a competitive, in-house solution. We found in-house development is the strongest contender to Flux CD, and while it was very compelling, we decided to go with the integration instead. We believe this should give our customers more value faster than a custom solution. Moreover, it should enable existing Flux users to benefit from our integrations with minor modifications in their usage patterns as we roll out the integrations.\n\n## What comes next?\n\nFirst, we want to [document our recommendations for using FluxCD with GitLab](https://gitlab.com/gitlab-org/gitlab/-/issues/389382). At the same time, we will change our GitOps documentation to recommend Flux instead of the legacy GitOps solution. We consider these the most important steps to minimize uncertainty and set you up for a successful start.\n\nTogether with the above, the team is working hard on shipping the first version of an [integrated Kubernetes UI](https://gitlab.com/gitlab-org/gitlab/-/issues/375449). We are starting with an environment overview and build an [entire Kubernetes dashboard](https://gitlab.com/groups/gitlab-org/-/epics/2493) as part of GitLab. The cluster UI integration will enable GitLab users to learn more about their cluster state without leaving the GitLab UI and should allow a nearly real-time view of GitOps deployments using Flux CD.\n\nWe have clear ideas on how to do what I described above. We are still researching and learning about many other topics, including [how to simplify Flux best accessing GitLab](https://gitlab.com/gitlab-org/gitlab/-/issues/389393). If you have experience using Flux with GitLab and have any feedback, recommendations, or requests on what the integration should support, we would like to hear from you. Please, reach out to me using [my GitLab profile](https://gitlab.com/nagyv-gitlab).\n\n## The Flux community\n\nBefore I close this article, I would like to say hi and thank you to the Flux community. We already got invited to the Flux development meeting, and the core team was very welcoming. As we always actively contributed to the core tools – first [`gitops-engine`](https://github.com/argoproj/gitops-engine/), later [`cli-utils`](https://github.com/kubernetes-sigs/cli-utils/) – supporting our GitOps offering, we are looking forward to contributing to Flux CD.\n\nWe are looking forward to working more closely with you. Thank you for building this great tool and community!\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n\nRead more:\n\n- More about the [Flux CD integration decision](https://gitlab.com/gitlab-org/gitlab/-/issues/357947) \n- Docs for [agent for Kubernetes](https://docs.gitlab.com/ee/user/clusters/agent/ci_cd_workflow.html) \n- Issue on [our current focus](https://gitlab.com/gitlab-org/gitlab/-/issues/389382) \n- Preparation issues: [Flux to GitLab access management](https://gitlab.com/gitlab-org/gitlab/-/issues/389393) and [Visualizing Kubernetes resources within the Environments page](https://gitlab.com/gitlab-org/gitlab/-/issues/375449)\n\n",[534,1477,9,726],{"slug":8929,"featured":6,"template":686},"why-did-we-choose-to-integrate-fluxcd-with-gitlab","content:en-us:blog:why-did-we-choose-to-integrate-fluxcd-with-gitlab.yml","Why Did We Choose To Integrate Fluxcd With Gitlab","en-us/blog/why-did-we-choose-to-integrate-fluxcd-with-gitlab.yml","en-us/blog/why-did-we-choose-to-integrate-fluxcd-with-gitlab",{"_path":8935,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8936,"content":8941,"config":8946,"_id":8948,"_type":14,"title":8949,"_source":16,"_file":8950,"_stem":8951,"_extension":19},"/en-us/blog/why-gitlab-ci-cd",{"title":8937,"description":8938,"ogTitle":8937,"ogDescription":8938,"noIndex":6,"ogImage":783,"ogUrl":8939,"ogSiteName":670,"ogType":671,"canonicalUrls":8939,"schema":8940},"Why GitLab CI/CD?","With GitLab’s out-of-the-box CI/CD, you can spend less time maintaining and more time creating.","https://about.gitlab.com/blog/why-gitlab-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why GitLab CI/CD?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-04-02\",\n      }",{"title":8937,"description":8938,"authors":8942,"heroImage":783,"date":8943,"body":8944,"category":299,"tags":8945},[788],"2019-04-02","\nDevOps speed is a competitive advantage for businesses. According to DORA, [companies that deploy more frequently perform better in the market](https://cloudplatformonline.com/2018-state-of-devops.html). Everyone wants to be able to do their jobs better and deploy more frequently, but as the organization grows, speed bumps keep getting in the way:\n\n*   **Too many integration points** – Connecting [CI/CD](/topics/ci-cd/) to all of the different tools in a [DevOps toolchain](/topics/devops/) is confusing and keeps adding more steps and more points of failure to the process.\n*   **Brittle tools** – We're spending more time maintaining and updating these tools than actually creating new features.\n*   **Slow modernization** – We want to leverage [microservices](/topics/microservices/) and [cloud native](/topics/cloud-native/) development, but we spend too much time putting out fires.\n\nWith these speed bumps come complicated workflows, lack of pipeline visibility, and confusion about processes. With the Total Cost of Ownership (TCO) going up as more resources go to maintenance, teams can't afford to innovate. As organizations scale, these complexities only get worse.\n\nThat sounds exhausting, doesn't it?\n\n## Current CI/CD tools\n\nAt GitLab, we love transparency so much we made it [one of our core values](https://handbook.gitlab.com/handbook/values/#transparency). It's also why [we list all other DevOps tools on our website](/competition/) (no, really). We think open and direct communication is the fastest way to get the feedback you need to make the right decisions. For DevOps teams, the right tools should make things easier but we've found that _more_ doesn't always mean _better_.\n\n### High maintenance\n\nIntegrating CI/CD tools with the rest of your toolchain can get complicated – managing and updating these tools regularly isn't any easier. Many teams rely on tool experts just to keep everything running smoothly.\n\n### Lack of cloud native compatibility\n\nAs more organizations look to leverage microservices and [cloud native](/topics/cloud-native/) development, they'll need CI/CD tools that support modern architecture. With some CI/CD platforms, teams still need additional plugins to connect to Kubernetes or a container registry. Teams using legacy CI/CD tools will need to upgrade in order to gain those cloud native capabilities.\n\n### Toolchain complexity\n\nToolchains sometimes have too much in common with [Rube Goldberg devices](https://www.youtube.com/watch?v=qybUFnY7Y8w). Adding on more applications, more platforms, and more handoffs increases complexity that slows down teams. Add to that the maintenance, plugin, and upgrade requirements to manage these separate tools, and productivity gets harder.\n\n## Why teams love GitLab CI/CD\n\nCI/CD tools should make engineers' lives easier by giving them greater visibility into their pipelines, without burdening them with complicated integrations and plugin maintenance. GitLab CI/CD is designed to be simple so teams can start using it right away.\n\n### Easy to use\n\nGitLab uses a YAML configuration that any developer can understand so you can build pipelines faster.\n\n### Cloud native CI/CD\n\nWith its built-in container registry and Kubernetes integration, GitLab supports cloud native development.\n\n### Simple architecture\n\nOne integrated application with one set of permissions.\n\n### Fast and efficient\n\nWith autoscaling runners, developers no longer have to wait on builds, and VMs spin up or down automatically to process queues at a lower cost.\n\n### Everything in one place\n\nGitLab CI/CD is already built into the same application that contains source code management, planning, monitoring, etc.\n\nAs a single application for the entire DevOps lifecycle, everything is in one conversation and visible across teams. With GitLab's out-of-the-box CI/CD, you can spend less time maintaining and more time creating. It's CI/CD that _just works_.\n\nWe invite you to explore GitLab CI/CD for yourself, and see why we were rated #1 in the Forrester CI Wave™.\n\n[Explore GitLab CI/CD](/solutions/continuous-integration/)\n{: .alert .alert-gitlab-purple.text-center}\n",[109,9,683],{"slug":8947,"featured":6,"template":686},"why-gitlab-ci-cd","content:en-us:blog:why-gitlab-ci-cd.yml","Why Gitlab Ci Cd","en-us/blog/why-gitlab-ci-cd.yml","en-us/blog/why-gitlab-ci-cd",{"_path":8953,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8954,"content":8959,"config":8964,"_id":8966,"_type":14,"title":8967,"_source":16,"_file":8968,"_stem":8969,"_extension":19},"/en-us/blog/why-gitlab-self-managed-is-the-perfect-partner-for-the-public-sector",{"title":8955,"description":8956,"ogTitle":8955,"ogDescription":8956,"noIndex":6,"ogImage":6915,"ogUrl":8957,"ogSiteName":670,"ogType":671,"canonicalUrls":8957,"schema":8958},"Why GitLab self-managed is the perfect partner for the public sector","Planning, source code management, CI/CD, app security, and compliance features make the DevSecOps platform a great pairing for government environments.","https://about.gitlab.com/blog/why-gitlab-self-managed-is-the-perfect-partner-for-the-public-sector","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why GitLab self-managed is the perfect partner for the public sector\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2023-12-13\",\n      }",{"title":8955,"description":8956,"authors":8960,"heroImage":6915,"date":8961,"body":8962,"category":769,"tags":8963},[1454],"2023-12-13","While Atlassian Server is closing in on [end of life](https://about.gitlab.com/blog/atlassian-server-ending-move-to-a-single-devsecops-platform/) (February 15), GitLab is expanding the capabilities of its self-managed DevSecOps platform to fully support the needs of the public sector. GitLab’s [Agile Planning and Delivery features](https://about.gitlab.com/solutions/agile-delivery/) help public sector teams consolidate their various Atlassian tools into GitLab’s comprehensive DevSecOps platform. With GitLab, everyone can truly collaborate and efficiently deliver value and consistent quality throughout the DevSecOps lifecycle, including planning, source code management, and continuous integration and delivery – all wrapped with application security and compliance.\n\n\"GitLab self-managed is not only something we offer, but it is important to us. It's a path we are going to continue to support with new functionality and a strong security SLA for critical and high vulnerabilities,\" says Joel Krooswyk, GitLab Federal CTO.\n\nGitLab is [trusted across the public sector](https://about.gitlab.com/solutions/public-sector/) in federal civilian agencies, all branches of the U.S. Department of Defense, the intelligence community, state and local governments, and many government contractors and system integrators due to its best-in-class DevSecOps features. GitLab’s application security and compliance features help public sector organizations meet increasing requirements to secure their software factories and supply chains when developing and delivering solutions to their customers.\n\nHere are some of the benefits of the GitLab DevSecOps Platform for the public sector.\n\n## Migrations your way\n\nGitLab can help organizations expedite their migration from Atlassian Server to a self-managed instance of the DevSecOps platform. Connectivity between Atlassian and GitLab is built into the GitLab platform to automate and simplify data transfer from Jira, Bitbucket, Bamboo, and more. Also, because the process is not a wholesale \"lift and shift,\" organizations can carry out their migration at their own pace, even running the Atlassian and GitLab platforms in parallel, if necessary.\n\n\"Customers can stage their migrations in the way that works best for them, moving teams over in a careful, phased approach. And, if you have a program that is ending soon, you don't have to include it in the migration at all,\" Krooswyk says.\n\nThese guides will help ease the migration from Atlassian to GitLab:\n- [Jira-to-GitLab migration](https://about.gitlab.com/blog/tips-for-a-successful-jira-to-gitlab-migration/)\n- [Bitbucket Server-to-GitLab migration](https://docs.gitlab.com/ee/user/project/import/bitbucket_server.html)\n- [Bamboo-to-GitLab CI/CD migration](https://about.gitlab.com/blog/migrating-from-bamboo-to-gitlab-cicd/)\n\n## Process-neutral planning\n\nGitLab is process-neutral; Agile features can be leveraged to utilize a variety of processes from simple to complex, such as Scaled Agile, on a project-by-project basis. Public sector teams can tailor their work to best meet their needs, while also using [group-defined labels](https://docs.gitlab.com/ee/user/project/labels.html#view-group-labels) to roll information up to management [group-level boards](https://docs.gitlab.com/ee/user/project/issue_board.html#group-issue-boards) and [analytics](https://docs.gitlab.com/ee/user/analytics/analytics_dashboards.html) to track progress across their organization.\n\n## Reduced administration burden\n\nPublic sector organizations can reduce their administrative burden in numerous ways with GitLab, from simplifying procurement processes to streamlining DevSecOps toolchains.\n\nFor instance, if an organization has a conglomerate of boutique contractors all under a single umbrella, they can consolidate licensing into a single purchase and achieve cost savings. They also can eliminate fragile and complex DIY toolchains that impede collaboration and innovation by [consolidating onto a single DevSecOps platform](https://about.gitlab.com/the-source/platform/devops-teams-want-to-shake-off-diy-toolchains-a-platform-is-the-answer/).\n\n## Support for security automation and strong SLAs\n\nGitLab features security automation and governance at scale at every step of the DevSecOps lifecycle. Public sector organizations can practice defense in depth and set granular policies and rules that automate compliance, ensuring a secure software supply chain. Developers can use security automation to minimize manual repetitive tasks so they can focus on deep, value-generating work. At the same time, GitLab's governance guardrails assure security teams that developers are following best practices across the entire company.\n\nFor instance, [required merge request approvals](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/#required-approvals) for [protected branches](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/rules.html#approvals-for-protected-branches) and approval rules enable an organization to [support zero trust in the DevSecOps lifecycle](https://about.gitlab.com/blog/why-devops-and-zero-trust-go-together/) by [defining code owners for reviews](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/rules.html#code-owners-as-eligible-approvers). Approval rules also can call out when [application security scanning](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/rules.html#security-approvals) or [license scanning](https://docs.gitlab.com/ee/user/compliance/license_approval_policies.html) finds a vulnerability or license that needs additional team members with that expertise to join the review and approval process.\n\nGovernment customers can choose to deploy the GitLab DevSecOps Platform as [a single, hardened application](https://about.gitlab.com/press/releases/2020-07-01-gitlab-announces-hardened-container-image-in-support-of-the-us-department-of-defense-enterprise-devsecops-initiative/) that simplifies end-to-end visibility and traceability.\n\nWith GitLab, security and compliance policies are managed and enforced consistently across an organization's DevSecOps processes. GitLab has worked closely with government customers to ensure that the [platform operates in a fully offline environment](https://docs.gitlab.com/ee/topics/offline/) to support the development needs of sensitive programs related to national security.\n\nGitLab's [vulnerability remediation timelines or SLAs](https://handbook.gitlab.com/handbook/security/threat-management/vulnerability-management/#remediation-slas) are based on many factors, such as regulatory compliance, customer SLOs and SLAs, vulnerability impact, scope, prevalence in GitLab environments, impact if exploited, and defining reasonable turn-around times for mitigation and remediation to protect GitLab and its customers. All of these factors will be considered when mapping the priority to [GitLab’s priority labels](https://docs.gitlab.com/ee/development/labels/index.html#priority-labels). All components in scope of vulnerability management are subject to the same SLAs.\n\n## Compliance across the entire software supply chain\n\nSecuring the whole software supply chain is about more than just the platform being compliant; GitLab  also helps the public sector develop and deliver compliant software to their own customers. For instance, GitLab supports [NIST SSDF](https://trust.gitlab.com/) guidance right out of the box, including the ability to generate a software bill of materials ([SBOM](https://about.gitlab.com/blog/the-ultimate-guide-to-sboms/)).\n\nGitLab also has critical features to keep the software supply chain secure such as [continuous vulnerability scanning](https://docs.gitlab.com/ee/user/application_security/continuous_vulnerability_scanning/), which can detect new vulnerabilities outside of an organization's pipeline and in the latest CycloneDX SBOM reports for the default branch. The [compliance dashboard](https://docs.gitlab.com/ee/user/compliance/compliance_center/) enables public sector organizations to report on and manage standards adherence, violations, and compliance frameworks for groups.\n\nControlling access is a key aspect of compliance and GitLab gives public sector organizations complete control over who has access to their development environment. Following zero trust principles, GitLab supports role-based permissions [out of the box](https://docs.gitlab.com/ee/user/permissions.html) as well as [custom roles](https://docs.gitlab.com/ee/user/custom_roles.html).\n\n> Find out [how Lockheed Martin used GitLab's compliance framework](https://about.gitlab.com/customers/lockheed-martin/) to enforce software quality and automation to make releases and dependency management more efficient.\n\n## Pipeline best practices everyone can access\n\nWith GitLab, organizations can easily share knowledge internally and across agencies by leveraging [innersourcing](https://about.gitlab.com/topics/version-control/what-is-innersource/). Teams can centralize their best practices around CI/CD and enable sharing of pipeline processes, including integrations with other tools, that have already been approved.\n\n\"As pipeline improvements are made, they are contributed back to that shared CI/CD knowledge,\" Krooswyk says.\n\nFor instance, public sector organizations can use compliance frameworks to describe the type of compliance requirements projects must follow and compliance pipelines to define a pipeline configuration to run for any projects with a given compliance framework. Teams can also [create CI/CD templates](https://docs.gitlab.com/ee/ci/examples/#adding-templates-to-your-gitlab-installation) to accelerate new development projects.\n\nCustomers also can build a CI component catalog to make reusing pipeline configurations easier and more efficient. Users can discover and collaborate on pipeline constructs so that they can be evolved and improved over time.\n\n> Get an [introduction to GitLab CI component catalogs](https://about.gitlab.com/blog/introducing-ci-components/) and how to best use them.\n\n## Support for cloud-neutral environments\n\nThe public sector has a mandate to remain cloud-neutral. Because GitLab isn’t commercially tied to any specific cloud provider, organizations can de-risk their multi-cloud strategy and avoid being locked into a single vendor. The DevSecOps platform also is designed to meet the unique needs of cloud-native applications and the infrastructure upon which they rely.\n\n## Visibility across the software development lifecycle\n\nA key aspect of succeeding at DevSecOps is visibility — it's necessary to measure, monitor, and make decisions. GitLab's [dashboarding and visualization](https://about.gitlab.com/solutions/value-stream-management/) features help organizations to leverage people, processes, and technology to create value-stream-driven software development and go from idea to customer value with the fastest cycle time possible. With one unified data store, teams on GitLab can measure efficiency, productivity, and other key metrics in one place, including [applying industry-standard DORA metrics](https://docs.gitlab.com/ee/user/analytics/dora_metrics.html). Get a holistic view of everything from DevOps adoption to developer productivity, vulnerability detection, software quality, innovation, and more.\n\n## Experienced professionals at the ready\nGitLab's professional services team has extensive experience in the public sector and understands your particular requirements. If you have multiple services, servers, and programs you need to migrate, we will help you plan that out.\n\n> Ready to migrate to GitLab? [Contact our sales team](https://about.gitlab.com/solutions/public-sector/) to start a conversation today.\n",[2243,9,1477,855,184],{"slug":8965,"featured":6,"template":686},"why-gitlab-self-managed-is-the-perfect-partner-for-the-public-sector","content:en-us:blog:why-gitlab-self-managed-is-the-perfect-partner-for-the-public-sector.yml","Why Gitlab Self Managed Is The Perfect Partner For The Public Sector","en-us/blog/why-gitlab-self-managed-is-the-perfect-partner-for-the-public-sector.yml","en-us/blog/why-gitlab-self-managed-is-the-perfect-partner-for-the-public-sector",{"_path":8971,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8972,"content":8977,"config":8982,"_id":8984,"_type":14,"title":8985,"_source":16,"_file":8986,"_stem":8987,"_extension":19},"/en-us/blog/why-improving-continuously-speeds-up-delivery",{"title":8973,"description":8974,"ogTitle":8973,"ogDescription":8974,"noIndex":6,"ogImage":1193,"ogUrl":8975,"ogSiteName":670,"ogType":671,"canonicalUrls":8975,"schema":8976},"Why improving continuously speeds up delivery","How do you keep pace with rapid changes in technology? The answer is continuous improvement.","https://about.gitlab.com/blog/why-improving-continuously-speeds-up-delivery","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why improving continuously speeds up delivery\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Jeremiah\"}],\n        \"datePublished\": \"2019-04-09\",\n      }",{"title":8973,"description":8974,"authors":8978,"heroImage":1193,"date":8979,"body":8980,"category":679,"tags":8981},[1198],"2019-04-09","\n\nI just finished Tom Friedman’s latest book “[Thank You for Being Late: An\nOptimist's Guide to Thriving in the Age of Accelerations](https://www.amazon.com/dp/B01F1Z0QHA),”\nin which he explores how our world is accelerating and everything is happening\nfaster and faster.  He explores the impact on business, society, economy, and\nenvironment. It’s a fantastic read – at times sobering and others exciting. I\nthink a fundamental takeaway from his research is that, from now on, business\nleaders must learn how to transform their organizations to operate at faster cycle\ntimes than ever before. While that sounds great, the obvious question is: How?\n\n## Operational efficiency and speed\n\nOne of the classic business books on operational efficiency and speed is Dr. Eli\nGoldratt’s classic, [“The Goal”](https://www.amazon.com/gp/product/0884271951).\nIn “The Goal,” the main character, Alex is a plant manager responsible for turning\naround a failing manufacturing plant. He learns a valuable lesson from his son’s\nscouting troop on a camping trip. As the group hikes into the woods, they spread\nout, because the slower hikers can’t keep up with the faster ones. No matter what\nAlex tries, he can't seem to keep them together. Then, he makes a small adjustment\nthat changes everything. He puts the slowest hiker in the front so that the entire\ntroop moves along at the speed of the slowest hiker. It’s the same in your\ndevelopment lifecycle: The fastest you can go depends on the most time-consuming\nstep in the [end-to-end value stream](/solutions/value-stream-management/).\n\nSo, how do you identify the most time-consuming step in your value stream? This\ndaunting task can be accomplished by adopting DevOps practices. In\n[“The Phoenix Project”](https://www.amazon.com/Phoenix-Project-DevOps-Helping-Business/dp/0988262592)\nand subsequent blog posts, Gene Kim describes the\n[“Three Ways”](https://itrevolution.com/the-three-ways-principles-underpinning-devops/)\nfrom which all DevOps patterns arise. These philosophies boil DevOps down to a set\nof three principles that can help organizations increase efficiency and speed by\ncarefully examining the value stream:\n\n1. **The First Way: Systems Thinking** – This first way is a flow of value from the business to the customer – or from Dev to Ops.\n1. **The Second Way: Amplify Feedback Loops** – The second way is to gather feedback from the customer, the business – or from Ops back to Dev.\n1. **The Third Way: Culture of Continual Experimentation and Learning** – Think of the third way as many smaller feedback loops of learning and improvement.\n\nWhat Alex learned in “The Goal” is an important lesson to remember: No matter\nwhat you change, you can only go as fast as the slowest. The same is true in your\nvalue stream. The principles of continuous improvement, exemplified by Gene’s\nThree Ways and [Kaizen](https://en.wikipedia.org/wiki/Kaizen) can be a powerful\nforce to help drive incremental and lasting change.\n\n## Continuous improvement through small changes\n\nWhy should you adopt a Kaizen approach?  Because it works. Kaizen is a strategy\nthat refers to continuous improvement through small changes that result in major\nimprovement. When applied in a business setting, Kaizen has significant impact\non culture, productivity, and quality.\n\nWhen teams practice continuous improvement, they;\n\n- Start with understanding their value stream.\n- Look for bottlenecks and waste.\n- Prioritize what to improve (remember the hikers).\n- Experiment with a minor change and learn.\n\nIn principle, continuous improvement and [DevOps isn’t difficult](/topics/devops/), if you approach\nit from a perspective of Kaizen and Gene Kim’s “Three Ways.” However, the\ncomplexity of fragmented toolchains and processes, siloed incentives, and lack\nof collaboration often get in the way of making lasting improvements in software\ndelivery.\n\n## Increase your DevOps success and reduce cycle time\n\nTo set the speed in the competitive race of software innovation, I have three suggestions:\n\n1. **Simplify your scope.** Focusing improvement efforts on one specific value \nstream at a time narrows your efforts to hone in on major problem areas rather\nthan becoming overwhelmed.\n1. **Empower your team.** Giving your delivery team the authority to experiment and\nimprove enables innovation to become a focus.   \n1. **Measure your value stream.** Understanding your cycle time and identifying \nbottlenecks enables you to take an objective look at what's slowing you down.\n\nIncreasing your DevOps success and reducing cycle time through continuous\nimprovement can help your organization continuously improve your value stream.\nAt GitLab, we’re helping teams reduce cycle time with our approach to DevOps,\nwhich unifies teams to focus on delivering value.\n\nAre you ready to reduce cycle\ntime? [Just commit.](/blog/strategies-to-reduce-cycle-times/)\n{: .alert .alert-gitlab-purple .text-center}\n",[9,683],{"slug":8983,"featured":6,"template":686},"why-improving-continuously-speeds-up-delivery","content:en-us:blog:why-improving-continuously-speeds-up-delivery.yml","Why Improving Continuously Speeds Up Delivery","en-us/blog/why-improving-continuously-speeds-up-delivery.yml","en-us/blog/why-improving-continuously-speeds-up-delivery",{"_path":8989,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":8990,"content":8995,"config":9000,"_id":9002,"_type":14,"title":9003,"_source":16,"_file":9004,"_stem":9005,"_extension":19},"/en-us/blog/why-software-developer-job-satisfaction-matters-and-how-to-make-it-happen",{"title":8991,"description":8992,"ogTitle":8991,"ogDescription":8992,"noIndex":6,"ogImage":1801,"ogUrl":8993,"ogSiteName":670,"ogType":671,"canonicalUrls":8993,"schema":8994},"Why software developer job satisfaction matters and how to make it happen","Science has proven happier developers are more productive. It’s time to take software developer job satisfaction seriously – here’s how the right combo of culture and tools, i.e., a DevOps platform, can help.","https://about.gitlab.com/blog/why-software-developer-job-satisfaction-matters-and-how-to-make-it-happen","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why software developer job satisfaction matters and how to make it happen\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2021-05-13\",\n      }",{"title":8991,"description":8992,"authors":8996,"heroImage":1801,"date":8997,"body":8998,"category":679,"tags":8999},[851],"2021-05-13","\nIn the midst of a global pandemic and an ongoing worldwide shortage of coders, software developer job satisfaction has never been more important. But to managers, and their teams, happiness can certainly feel elusive, hard-to-measure, and difficult to achieve.\n\nBut there’s no question it’s a worthwhile goal, and you don’t have to look further than science for proof of that. Two years ago authors Daniel Graziotin and Fabian Fagerholm [studied more than 1300 developers](https://link.springer.com/chapter/10.1007/978-1-4842-4221-6_10) to rate their happiness, assess factors that make them unhappy, and to see if software developer job satisfaction was truly linked to improved productivity. The duo used the Scale of Positive and Negative Experience (SPANE) and their results were published in [_Rethinking Productivity in Software Engineering_](https://link.springer.com/book/10.1007/978-1-4842-4221-6).\n\nTheir findings were surprisingly straightforward: Coders were a \"moderately happy\" group, as a whole, and were made unhappy by three primary things: being stuck while problem solving, time pressure, and working with bad code or with poor coding processes. A fourth reason related to information overload. \"(The)...current software tools may overload developers with information,\" the study found. The research went on to outline how unhappy developers were less productive, suffered from \"broken flow,\" had less motivation, and produced low quality code. And finally, after two different psychological tests done in labs, the authors were able to declare definitively that \"happy software developers are indeed more productive.\"\n\n## Get happy, but how?\n\nNow that science has validated what we *felt* had to be true all along, it’s time to step back and consider the factors that play into software developer job satisfaction.\n\nA good place to start is with the development process. In our [2021 Global DevSecOps Survey](/developer-survey/), we found almost 36% of respondents said their teams are doing DevOps or DevSecOps, up from 27% in 2020. And there’s a reason why DevOps is so popular: it’s not only most likely to yield better code quality and faster time to market but it also adds to developer job satisfaction. In fact, more than 13% of respondents said [DevOps](/topics/devops/) makes developers  happier or makes their team more attractive to potential new employees.\n\nBut one of the realities of DevOps is tools...lots of them. In our survey, 38% of respondents used five tool chains while nearly 28% used between five and 10 (and 56% said there were an average of five tools on each tool chain.) Five tool chains with five tools each means teams are dealing with 25 tools – that’s certainly building a case for information overload and potentially *very unhappy* developers.\n\n_Our [2022 Global DevSecOps Survey](/developer-survey/) is out now! Learn the latest in DevOps insights from over 5,000 DevOps professionals._\n\n## The beauty of less\n\nSo if DevOps, streamlined, is the key to software developer job satisfaction, the answer is obvious: Adopting a DevOps platform that brings tools together in a single application for collaboration, visibility, and development velocity makes for happier devs.\n\nOur survey respondents seemed to agree. When we asked about the benefits of a DevOps platform, the answers were clear: Better DevOps overall, improved collaboration, easier automation, and visibility/traceability. Here’s what they said:\n\n_\"Reduced mean time to recovery (MTTR), quicker time to market, reduced lead time for fixes, and fewer change failures.\"_\n\n_\"More ownership of everything to do with the product.\"_\n\n_\"Reliability, repeatability, consistency, productivity.\"_\n\nIf it’s time for more efficient DevOps (and of course happier developers), take our quiz to understand your level of DevOps platform maturity. And if you want to understand the heavy toll too many tools can take on your team, dive into [how to avoid the DevOps tax](/topics/devops/use-devops-platform-to-avoid-devops-tax/).\n",[681,9,267],{"slug":9001,"featured":6,"template":686},"why-software-developer-job-satisfaction-matters-and-how-to-make-it-happen","content:en-us:blog:why-software-developer-job-satisfaction-matters-and-how-to-make-it-happen.yml","Why Software Developer Job Satisfaction Matters And How To Make It Happen","en-us/blog/why-software-developer-job-satisfaction-matters-and-how-to-make-it-happen.yml","en-us/blog/why-software-developer-job-satisfaction-matters-and-how-to-make-it-happen",{"_path":9007,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":9008,"content":9014,"config":9019,"_id":9021,"_type":14,"title":9022,"_source":16,"_file":9023,"_stem":9024,"_extension":19},"/en-us/blog/why-the-market-is-moving-to-a-platform-approach-to-devsecops",{"title":9009,"description":9010,"ogTitle":9009,"ogDescription":9010,"noIndex":6,"ogImage":9011,"ogUrl":9012,"ogSiteName":670,"ogType":671,"canonicalUrls":9012,"schema":9013},"Why the market is moving to a platform approach to DevSecOps","A single DevOps platform improves ROI, the developer experience, and customer retention and satisfaction.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667886/Blog/Hero%20Images/cobolshortage.jpg","https://about.gitlab.com/blog/why-the-market-is-moving-to-a-platform-approach-to-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why the market is moving to a platform approach to DevSecOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-10-24\",\n      }",{"title":9009,"description":9010,"authors":9015,"heroImage":9011,"date":9016,"body":9017,"category":769,"tags":9018},[745],"2022-10-24","The market is moving to a platform approach to [DevSecOps](/topics/devsecops/). What had previously been a process that let different engineering teams adopt their own tools for different stages of the software development lifecycle – what we call “DIY DevOps” – is being replaced by a method that leverages a single application.\n\nWhy is this happening? First, IT managers are coming to grips with the inefficiencies and cost of toolchain sprawl. Second, executives are relying on digital transformation to solve significant business-level problems: improving developer onboarding and productivity, building high-performing teams, securing the software supply chain, and creating a secure on-ramp to the public cloud. Finally, there’s the impact of [the potential recession](https://www.worldbank.org/en/news/press-release/2022/09/15/risk-of-global-recession-in-2023-rises-amid-simultaneous-rate-hikes), which has accelerated the above trends.\n\nWe recently commissioned a [Forrester Consulting “Total Economic Impact™ of GitLab’s Ultimate Plan” study](https://page.gitlab.com/resources-study-forrester-tei-gitlab-ultimate.html) to better understand how companies save on costs and achieve business and technology goals with GitLab. We focused on our Ultimate tier, which is the fastest growing part of the business. We believe the results align with the business requirements needed to endure economic headwinds and position companies for success: strong return on technology investment, cost savings through technical tool consolidation, a faster pace of application releases to acquire and retain customers, greater development and delivery efficiency, increased and simplified security, and a rapid payback period. \n\nGitLab’s DevOps platform enables source code management, continuous integration/continuous delivery, advanced security capabilities, and more in a single application. The Forrester study found that combination led to:\n\n* Three-year ROI of 427%\n* 12x increase in the number of annual releases for revenue generation applications\n* 87% improvement in development and delivery efficiency time\n* Less than six-month payback period\n\n## Understanding DevOps pain points\n\nTo realize the benefits of a single DevOps platform, organizations have to assess their pain points. Here are some common development lifecycle obstacles that affect organizations of all sizes:\n\n* Complex toolchains and processes\n* Inefficient development environments\n* Lack of security skills\n* Rushed development cycles\n* No single source of truth or single code repository\n* Poor software testing practices\n\nAll of these pain points can impede an organization’s ability to manage through a recession and recovery. \n\n## The benefits of a DevOps platform\n\nThe Forrester study found that GitLab Ultimate provided a composite organization, based on interviewed customers, 10 key quantified benefits over a three-year period. While each benefit on its own could have a positive impact on a business’s ability to stay steady and even thrive during difficult economic times, together they are a powerhouse that can eliminate many pain points.\n\nHere are five of those benefits of the GitLab Ultimate Plan:\n\n### Vulnerability management\n\nAs GitLab’s 2022 Global DevSecOps Survey found, [security is top of mind](/blog/gitlabs-2022-global-devsecops-survey-security-is-the-top-concern-investment/) for all DevOps organizations. Yet security at scale can be challenging, especially finding and hiring professionals with the right skills.\n\nA benefit of GitLab Ultimate, according to the Forrester study, is greater efficiency in managing vulnerabilities. The DevOps platform [integrates and automates vulnerability management](/direction/govern/threat_insights/vulnerability_management/) within the development lifecycle. Issues can be identified, logged, triaged, tracked, and remediated – all in the same DevOps application. Developers can address vulnerabilities in real time, avoiding release delays or software defects and bugs. According to Forrester, the composite organization realized savings of “hours a week because developers have access to better context about the vulnerabilities. This in turn means less back and forth between development and QA/security on an issue.”\n\n### Less homegrown tool development/open source solution management\n\nDevOps teams often spend a considerable amount of time creating tools they need from scratch or finding and managing open source options. GitLab reduces [toolchain complexity (a.k.a. debt)](/blog/battling-toolchain-technical-debt/) by building into the platform the tools and features developers need, enabling them to manage their environment as a single application. GitLab Ultimate enabled the Forrester study’s composite organization to shift “from manually intensive tasks requiring the full attention of the developer, security, and operations teams to an environment where they now spend no more than a few hours per day per person on the same tasks.”\n\n### Efficient development\n\nA highly efficient development process impacts the developer experience, which improves retention. GitLab Ultimate enabled the composite organization to develop code faster, deliver higher quality code, enable better collaboration, and improve the ability to monitor applications, according to the Forrester study. Other advantages include: more streamlined processes, better efficiency among developers and non-technical teammates, and improved visibility and collaboration across the SDLC.\n\n### Better code quality\n\nPoor code quality directly affects a company’s ability to attract and retain customers. GitLab enabled the composite organization to have “a single application that streamlines processes to ensure code is tested, scanned, and verified before it is released,” according to the Forrester study. The result is high-quality code (with reduced defects and bugs) that meets security standards.\n\n### More releases, faster\n\nOrganizations want to be able to address customer needs for newer applications, updates, and enhanced feature sets in a timely fashion. With GitLab, the composite organization can “increase the velocity of updates and releases, allowing it to meet customers’ rising digital demands.”\n\nDevOps brought about the following unquantified benefits for the composite organization, according to the Forrester study: more satisfied employees because they are more productive and collaborative; more satisfied customers because of a smoother project workflow, improved release quality, and a faster release frequency; and improved market innovation and competitiveness due to faster development lifecycle and time to market.\n\nWhile DevOps platform benefits are applicable to any economic environment, they are even more so in this time of economic uncertainty. GitLab enables organizations to extract the most out of their DevOps environment and achieve faster, higher quality, and more secure development and release cycles.\n\n> Download the full [Forrester Consulting “Total Economic Impact of GitLab’s Ultimate Plan” study](https://page.gitlab.com/resources-study-forrester-tei-gitlab-ultimate.html) for:\n\n* Additional benefits of GitLab Ultimate Plan\n* Testimonials from GitLab customers Forrester interviewed\n* Assumptions and risks to calculate ROI",[9,2981,1040],{"slug":9020,"featured":6,"template":686},"why-the-market-is-moving-to-a-platform-approach-to-devsecops","content:en-us:blog:why-the-market-is-moving-to-a-platform-approach-to-devsecops.yml","Why The Market Is Moving To A Platform Approach To Devsecops","en-us/blog/why-the-market-is-moving-to-a-platform-approach-to-devsecops.yml","en-us/blog/why-the-market-is-moving-to-a-platform-approach-to-devsecops",{"_path":9026,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":9027,"content":9033,"config":9037,"_id":9039,"_type":14,"title":9040,"_source":16,"_file":9041,"_stem":9042,"_extension":19},"/en-us/blog/why-were-sticking-with-ruby-on-rails",{"title":9028,"description":9029,"ogTitle":9028,"ogDescription":9029,"noIndex":6,"ogImage":9030,"ogUrl":9031,"ogSiteName":670,"ogType":671,"canonicalUrls":9031,"schema":9032},"Why we're sticking with Ruby on Rails","GitLab CEO and co-founder Sid Sijbrandij makes the case for Ruby on Rails.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668296/Blog/Hero%20Images/gitlab-ruby.jpg","https://about.gitlab.com/blog/why-were-sticking-with-ruby-on-rails","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we're sticking with Ruby on Rails\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2022-07-06\",\n      }",{"title":9028,"description":9029,"authors":9034,"heroImage":9030,"date":6452,"body":9035,"category":769,"tags":9036},[1609],"\nWhen David Heinemeier Hansson created Ruby on Rails ([interview](https://corecursive.com/045-david-heinemeier-hansson-software-contrarian/)), he was guided by his experience with both PHP and Java. On the one hand, he didn’t like the way the verbosity and rigidness of Java made Java web frameworks complex and difficult to use, but appreciated their structural integrity. On the other hand, he loved the initial approachability of PHP, but was less fond of the quagmires that such projects tended to turn into.\n\n![Ruby vs. Java](https://about.gitlab.com/images/blogimages/ruby1.png)\n\nIt seems like these are exclusive choices: You either get approachable and messy or well-structured and hard to use, pick your poison. We used to make a very similar, and similarly hard, distinction between server-class operating systems such as Unix, which were stable but hard to use, and client operating systems such as Windows and MacOS that were approachable but crashed a lot.\n\nEveryone accepted this dichotomy as God-given until NeXT put a beautiful, approachable and buttery-smooth GUI on top of a solid Unix base. Nowadays, “server-class” Unix runs not just beautiful GUI desktops, but also most phones and smart watches.\n\nSo it turned out that approachability and crashiness were not actually linked except by historical accident, and the same turns out to be true for approachability and messiness in web frameworks: They are independent axes.\n\n![approachability and messiness](https://about.gitlab.com/images/blogimages/ruby2.png)\n\nAnd these independent axes opened up a very desirable open spot in the lower right hand corner: an approachable, well-structured web framework.\nWith its solid, metaprogrammable Smalltalk heritage and good Unix integration, Ruby proved to be the perfect vehicle for DHH to fill that desirable bottom right corner of the table with Rails: an extremely approachable, productive and well-structured web framework. \n\n![a well-structured framework](https://about.gitlab.com/images/blogimages/ruby3.png)\n \nWhen GitLab co-founder Dmitriy Zaporozhets decided he wanted to work on software for running his (and your) version control server, he also came from a PHP background. But instead of sticking with the familiar, he chose Rails. Dmitry's choice may have been prescient or fortuitous, but it has served GitLab extremely well, in part because David succeeded in achieving his goals for Rails: approachability with good architecture.\n\n## Why modular?\n\nIn the preceding section, it was assumed as a given that modularity is a desirable property, but as we also saw it is dangerous to just assume things.  So why, and in what contexts, is modularity actually desirable?\n\nIn his 1971 paper [\"On the Criteria to be Used in Decomposing Systems into Modules\"](https://prl.ccs.neu.edu/img/p-tr-1971.pdf), David L. Parnas gave the following (desired) benefits of a modular system:\n\n- Development time should “be shortened because separate groups would work on each module with little need for communication.”\n- It should be possible to make “drastic changes or improvements in one module without changing others.”\n- It should be possible to study the system one module at a time.\n\nThe importance of reducing the need for communication was later highlighted by Fred Brooks in _[The Mythical Man Month](https://en.wikipedia.org/wiki/The_Mythical_Man-Month)_, with the additional communication overhead one of the primary reasons for the old saying that \"adding people to a late software project makes it later.\" \n\n## We don’t need microservices\n\nModularity has generally been as elusive as it is highly sought after, with the default architecture of most systems being the [Big Ball of Mud](http://laputan.org/mud/). It is therefore understandable that designers took inspiration from arguably the largest software system in existence: the World Wide Web, which is modular by necessity, it cannot function any other way.\n\nOrganizing your local software systems using separate processes, microservices that are combined using [REST](https://www.ics.uci.edu/~fielding/pubs/dissertation/fielding_dissertation.pdf) architectural style does help enforce module boundaries, via the operating system, but comes at significant costs. It is a very heavy-handed approach for achieving modularity.\n\nThe difficulties and costs of running what is now a gratuitously distributed system are significant, with some of the performance and reliability issues documented in the well-known [fallacies of distributed computing](https://en.wikipedia.org/wiki/Fallacies_of_distributed_computing). In short, the performance and reliability costs are significant, as function calls that take nanoseconds and never fail are replaced with network ops that are three to six orders of magnitude slower and do fail. Failures become much harder to diagnose if they must be traced across multiple services with very little tooling support.\nYou need a fairly sophisticated DevOps organization to successfully run microservices. This doesn't really make a difference if you run at a scale that requires that sophistication anyhow, but it is very likely that [you are not Google](https://blog.bradfieldcs.com/you-are-not-google-84912cf44afb?gi=1b82f8ef279a).\n\nBut even if you think you can manage all that, it is important to note that all this accidental complexity is on top of the original essential complexity of your problem, microservices do nothing to reduce complexity. And even the hoped-for modularity improvements are not in the least guaranteed, typically what happens instead is that you get a [distributed ball of mud](http://www.codingthearchitecture.com/2014/07/06/distributed_big_balls_of_mud.html).\n\n## Monorails\n\nBy making good architecture approachable and productive, Rails has allowed GitLab to develop a [modular monolith](https://medium.com/@dan_manges/the-modular-monolith-rails-architecture-fb1023826fc4). A modular monolith is the exact opposite of a distributed ball of mud: a well-structured, well-architected, highly modular program that runs as a single process and is as [boring](https://handbook.gitlab.com/handbook/values/#boring-solutions) as possible.\n\nAlthough structuring GitLab as a monolith has been extremely beneficial for us, we are not dogmatic about that structure. Architecture follows needs, not the other way around. And while Rails is excellent technology for our purposes, it does have a few drawbacks, one of them being performance. Luckily, only a tiny part of most codebases is actually performance critical. We use our own [gitaly](https://www.google.com/url?q=https://docs.gitlab.com/ee/administration/gitaly/&sa=D&source=docs&ust=1656441057979077&usg=AOvVaw11r4iMGjvs6PrtTJEkeTbO) daemon written in Go to handle actual git operations, and [PostgreSQL](https://thenewstack.io/two-sizes-fit-most-postgresql-and-clickhouse/) for non-repository persistence.\n\n## Open Core\n\nLast but not least, our modular monolith turns [our](/blog/gitlab-is-open-core-github-is-closed-source/) [Open Core](https://en.wikipedia.org/wiki/Open-core_model) business model from being just a nice theory into a practical [reality](https://www.cnbc.com/2021/10/14/gitlab-jumps-in-nasdaq-debut-after-pricing-ipo-above-expected-range.html). Although Rails does not accomplish this by itself, that would be our wonderful contributors and engineers, it does lay the proper foundations.\n\nIn order to reap the true [benefits](https://en.wikipedia.org/wiki/The_Cathedral_and_the_Bazaar) of open source, the source code that is made available must be approachable for contributors. In order to maintain architectural integrity in the face of contributions from a wide variety of sources, and to keep a clear demarcation line between the open and closed components, the code must be very well structured. Sound familiar?\n\nWouldn’t it be better to have a proper plugin interface? Or better yet, a services interface modeled on microservices? In a word: no. Not only do these approaches impose deployment and integration hurdles that go far beyond “I made a small change to the source code,\" they often enforce architectural constraints too rigidly. Anticipating all the future extension points is a fool's errand, one that we luckily did not embark on, and do not have to.\n\nWith our boring modular monolith, users and other third-party developers can and do contribute enhancements to the core product, giving us tremendous leverage, coupled with an unbeatable pace and scalability of innovation.\n",[9,682,1515],{"slug":9038,"featured":6,"template":686},"why-were-sticking-with-ruby-on-rails","content:en-us:blog:why-were-sticking-with-ruby-on-rails.yml","Why Were Sticking With Ruby On Rails","en-us/blog/why-were-sticking-with-ruby-on-rails.yml","en-us/blog/why-were-sticking-with-ruby-on-rails",{"_path":9044,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":9045,"content":9050,"config":9055,"_id":9057,"_type":14,"title":9058,"_source":16,"_file":9059,"_stem":9060,"_extension":19},"/en-us/blog/working-with-performance-metrics",{"title":9046,"description":9047,"ogTitle":9046,"ogDescription":9047,"noIndex":6,"ogImage":4082,"ogUrl":9048,"ogSiteName":670,"ogType":671,"canonicalUrls":9048,"schema":9049},"How application performance monitoring metrics helps developers","Automatically detect and monitor Kubernetes Clusters and deployed applications from the GitLab interface with application performance metrics (APM).","https://about.gitlab.com/blog/working-with-performance-metrics","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How application performance monitoring metrics helps developers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Saumya Upadhyaya\"},{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2020-05-07\",\n      }",{"title":9046,"description":9047,"authors":9051,"heroImage":4082,"date":9052,"body":9053,"category":679,"tags":9054},[2021,2022],"2020-05-07","\n[Application Performance Metrics](/direction/monitor/platform-insights/), also referred to as GitLab Metrics, is designed for developers who need to understand the impact of the changes they are making on performance, and DevOps engineers/operators who are tasked with keeping the production systems up and running. GitLab Metrics, which is at [viable maturity](/direction/maturity/#monitor), can automatically detect and monitor Kubernetes clusters deployed via GitLab. The GitLab Metrics tool can also monitor all of your custom application metrics so that you can see how your entire system is behaving and performing without leaving the familiar GitLab interface.\n\nGitLab has application performance monitoring tightly and automatically integrated into the DevOps process, which allows you to move seamlessly from development to production with confidence. GitLab Metrics is just one part of the [GitLab Monitoring solution](/direction/monitor/). When the whole suite of GitLab Monitoring tools is used together, we can help you decrease the frequency and severity of production incidents.\n\n## What’s under the hood?\n\nGitLab Metrics is powered by [Prometheus](https://prometheus.io/). Prometheus is quickly becoming the de facto standard for metrics for the cloud native community, because it rises to the top for monitoring Kubernetes and the available integrations cover the major elements of the cloud native ecosystem.\n\n## How to use GitLab Metrics?\n\nGitLab Metrics can be used in two ways.\n\nFirst, you can use Prometheus as a [managed application](https://docs.gitlab.com/ee/update/removals.html) within GitLab. Prometheus can be installed into your GitLab managed Kubernetes cluster with one click.\n\n![System Metrics](https://about.gitlab.com/images/blogimages/blog-metrics-system-metrics.png){: .shadow}\nHow the system metrics dashboard looks to users.\n{: .note.text-center}\n\nWhen integrated with Prometheus and Kubernetes, GitLab Metrics includes the following powerful capabilities:\n* [Default metrics](https://docs.gitlab.com/ee/user/project/integrations/prometheus.html#getting-metrics-to-display-on-the-metrics-dashboard) collected from Prometheus, such as memory and core usage for the pod and canary deployment, Knative invocations, NGINX, AWS ELB, HA Proxy metrics, etc.\n* [Custom metrics](https://docs.gitlab.com/ee/user/project/integrations/prometheus.html#adding-additional-metrics) can be configured with a promQL query.\n* [Alerts](https://docs.gitlab.com/ee/user/project/integrations/prometheus.html#setting-up-alerts-for-prometheus-metrics) can be added on the UI directly for each metric.\n* Application deploys works by deploying to the monitored environment and can be [visualized on the metrics chart](https://docs.gitlab.com/ee/user/project/integrations/prometheus.html#getting-metrics-to-display-on-the-metrics-dashboard) itself to correlate performance spikes due to deploys.\n* [Custom dashboards](https://docs.gitlab.com/ee/user/project/integrations/prometheus.html#defining-custom-dashboards-per-project) can be configured as a YAML file and an existing GitLab default dashboard can be replicated as required.\n\n![Custom Dashboards](https://about.gitlab.com/images/blogimages/blog-metrics-key-services.png){: .shadow}\nUse a YAML file to configure a customized Metrics dashboard.\n{: .note.text-center}\n\nIf you already have an operational Prometheus instance that you would like to integrate with GitLab, you can simply point to the Prometheus server from within GitLab. In this case, performance metrics are retrieved from the external instance of Prometheus, and displayed within the GitLab interface.\n\n## How is GitLab dogfooding our metrics capability?\n\nAt GitLab, [dogfooding](https://handbook.gitlab.com/handbook/values/#dogfooding) is one of the main tenets of our [results](https://handbook.gitlab.com/handbook/values/#results) value.\n\nThe [GitLab infrastructure team](/handbook/engineering/infrastructure/) is used as an internal customer, and they provide feedback which feeds directly into how we develop our metrics capabilities. Prometheus and Grafana are two tools the GitLab infrastructure team uses. One of the main reasons the infrastructure team was reluctant to implement GitLab metrics was our previously inadequate graphing capabilities. To encourage our infrastructure team to dogfood metrics, we are focused on filling critical and non-critical gaps in GitLab metrics charts, which initiates a feedback loop for the product. Our goal is to eventually phase out Grafana and work exclusively with Prometheus and GitLab charts to monitor GitLab.com, we will do it on our GitLab way, iteratively, first we'll replace all of our publicly facing [dashboard](https://dashboards.gitlab.com/).\n\n## What's next for GitLab Metrics\n\nGet started by visiting the GitLab Metrics [documentation page](https://docs.gitlab.com/ee/user/project/integrations/prometheus.html) and [directions page](/direction/monitor/platform-insights/). We’d love your help with prioritizing work on the most valuable improvements to the GitLab Metrics solution.\n\nTo report a bug or request a feature or enhancement, follow these steps:\n* Open an issue in the [GitLab project](https://gitlab.com/gitlab-org/gitlab/issues).\n* Describe the feature enhancement and, if possible, include examples.\n* Add these labels to the issue: `devops::monitor`, `Category::Metrics`\n* Tag @dhershkovitch on the issue\n\nCover image by [chuttersnap](https://unsplash.com/photos/gts_Eh4g1lk) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[9,916],{"slug":9056,"featured":6,"template":686},"working-with-performance-metrics","content:en-us:blog:working-with-performance-metrics.yml","Working With Performance Metrics","en-us/blog/working-with-performance-metrics.yml","en-us/blog/working-with-performance-metrics",{"_path":9062,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":9063,"content":9068,"config":9074,"_id":9076,"_type":14,"title":9077,"_source":16,"_file":9078,"_stem":9079,"_extension":19},"/en-us/blog/you-are-hired-two-gitlab-contributors-turn-their-success-into-full-time-engineering-roles",{"title":9064,"description":9065,"ogTitle":9064,"ogDescription":9065,"noIndex":6,"ogImage":2055,"ogUrl":9066,"ogSiteName":670,"ogType":671,"canonicalUrls":9066,"schema":9067},"How contributors earned full-time engineering roles at GitLab","As we continue to celebrate the 10th anniversary of the first commit to GitLab, here's a look at how two highly active community members became enthusiastic team members.","https://about.gitlab.com/blog/you-are-hired-two-gitlab-contributors-turn-their-success-into-full-time-engineering-roles","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"You're hired! Two GitLab contributors turn their success into full-time engineering roles\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2021-11-12\",\n      }",{"title":9069,"description":9065,"authors":9070,"heroImage":2055,"date":9071,"body":9072,"category":769,"tags":9073},"You're hired! Two GitLab contributors turn their success into full-time engineering roles",[745],"2021-11-12","[Greg Myers](https://gitlab.com/greg) and [Rajendra Kadam](https://gitlab.com/rkadam3) have something beyond their engineering roles at GitLab in common – both started out as GitLab contributors. We wanted to share their stories as part of our celebration around the 10th anniversary of the first commit to GitLab.\n\nMyers, a GitLab Senior Support Engineer, says his contributions started in 2018, when he first found his passion for helping other community forum members. \n\n“Most of my early contributions involved helping people set up, configure, and troubleshoot self-hosted GitLab installations,” Myers says.\n\nHe enjoyed this helper role so much he applied for an engineering position, but failed the technical interview and didn’t receive an offer. “I kept contributing to GitLab and helping others in the forum while I leveled up in my weak areas,” he says.\n\nKadam, a GitLab Back-end Engineer and [GitLab hero](/community/heroes/members/), started contributing to GitLab in Jan 2020 to learn more about Ruby on Rails and apply it to his then-workplace. \n\n“I did not stop after that since it is more than the code. I loved working with people at GitLab and the culture, even though I was not a full-time team member,” Kadam says.\n\nLike Kadam, Myers enjoyed being a part of the GitLab community. “The majority of my ‘code’ contributions back then were quite simple – fixing typos and markdown formatting issues in documentation,” he says. “I'd never contributed to an open source project of this size and caliber, and I was impressed by how easy and smooth it was to get involved and contribute.”\n\nHe remembers feeling “star-struck” when GitLab co-founder Dmitriy Zaporozhets personally responded in the comments to one of his first MRs.\n\nUsing what he learned as a contributor, Kadam earned a promotion from his employer. He went on to participate in [GitLab hackathons](/community/hackathon/), winning three in a series. His prominence in the GitLab community led him to be offered and to accept an internal engineering role in February 2021. Kadam blogged about the journey from being a contributor to a team member [on Medium](https://rajendraak.medium.com/how-i-got-a-job-at-gitlab-a3515214b74b).\n\nMyers, meanwhile, feeling more confident about his skills, took another shot at a team member role. “After four months, I reapplied for the support engineer position, and this time I got the job. Now it is my job to help others with GitLab and contribute to GitLab, and I love what I do,” Myers says.\n\nAs a Developer Relations Support counterpart, he helps others in the GitLab community forum and advocates for the GitLab wider community. And, as a GitLab Open Source Support Liaison, “I give back to open source communities I know and love,” he says.\n\nHe encourages others to not only contribute to the GitLab community but to help other forum members as he did. After all, you never know where those contributions can lead. “Being a GitLab community member and contributor led me to my dream job,” he says.",[9,1515,728],{"slug":9075,"featured":6,"template":686},"you-are-hired-two-gitlab-contributors-turn-their-success-into-full-time-engineering-roles","content:en-us:blog:you-are-hired-two-gitlab-contributors-turn-their-success-into-full-time-engineering-roles.yml","You Are Hired Two Gitlab Contributors Turn Their Success Into Full Time Engineering Roles","en-us/blog/you-are-hired-two-gitlab-contributors-turn-their-success-into-full-time-engineering-roles.yml","en-us/blog/you-are-hired-two-gitlab-contributors-turn-their-success-into-full-time-engineering-roles",{"_path":9081,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":9082,"content":9088,"config":9095,"_id":9097,"_type":14,"title":9098,"_source":16,"_file":9099,"_stem":9100,"_extension":19},"/en-us/blog/100-runners-in-less-than-10mins-and-less-than-10-clicks",{"title":9083,"description":9084,"ogTitle":9083,"ogDescription":9084,"noIndex":6,"ogImage":9085,"ogUrl":9086,"ogSiteName":670,"ogType":671,"canonicalUrls":9086,"schema":9087},"Setting up 100 AWS Graviton Spot Runners for GitLab","Utilizing the GitLab HA Scaling Runner Vending Machine for AWS Automation to setup 100 GitLab runners on AWS Spot.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669882/Blog/Hero%20Images/hundredgitlabspotrunner.png","https://about.gitlab.com/blog/100-runners-in-less-than-10mins-and-less-than-10-clicks","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to provision 100 AWS Graviton GitLab Spot Runners in 10 Minutes for $2/hour\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"},{\"@type\":\"Person\",\"name\":\"Nupur Sharma\"}],\n        \"datePublished\": \"2021-08-17\",\n      }",{"title":9089,"description":9084,"authors":9090,"heroImage":9085,"date":9092,"body":9093,"category":791,"tags":9094},"How to provision 100 AWS Graviton GitLab Spot Runners in 10 Minutes for $2/hour",[1239,9091],"Nupur Sharma","2021-08-17","\n\nManaging elastically scaled or highly available compute infrastructures is one of the key challenges the cloud was built for. Application scaling concerns can be handled by cloud services that are purpose designed, rigorously tested, and continually improved. This article dives into some specific enablement automation that brings the benefits of AWS Autoscaling Groups (ASG) to runner management. There are benefits to both the largest fleets and single instance runners.\n\nEmbedded in this article is a YouTube video that demonstrates the deployment of 100 GitLab runners on Amazon EC2 Spot compute in less than 10 minutes using less than 10 clicks. The video also shows updating this entire fleet in under 10 minutes to emphasize the time savings of built-in maintenace.\n\nThe information and automation in this article applies to GitLab Private Runners which are deployed on your own compute resources. Self-managed GitLab instances require private runners, but they can also be configured and used with GitLab.com SaaS accounts.\n\n## Well-architected runner management\n\nThere are many different reasons that a customer might need to deploy multiple runners with various characteristics. Some of the more popular ones are:\n\n- Workloads that require large-scale runner fleets.\n- To gain cost savings through Spot compute, uptime scheduling, and ARM architecture.\n- Projects with high demand of CI activity to make sure that the runner is not being held up by jobs on another project.\n- Jobs that have special security requirements, e.g., security credentials, role-based access or managed identities for Continuous Delivery (CD). These security requirements can enable instance-level (AWS IAM Instance Profile) security by allowing runners with sufficient rights to deploy in specific target environments. For example, a CD runner for non-production environments and a different runner for production.\n- Implementing role-based access control rather than user-based. This means users don't have to use secrets to manage security requirements for CI jobs to accomplish their tasks.\n- Development teams can be confident the runner has the same capabilities for CI and CD automation they test through their interactive logins by leveraging a common IAM role.\n\n### The challenges of building production-grade elastic GitLab Runners\n\n[The GitLab Runner](https://docs.gitlab.com/runner/) is the workhorse of GitLab CI and CD capabilities. The runner can handle numerous operating environments and automation functions for a GitLab instance. The GitLab Runner has become very sophisticated due to the broad range of supported environments. In order to successfully configure the GitLab Runner as a set-it-and-forget-it service, the user has to work through many different decisions and considerations. We summarize some of the GitLab Runner-specific considerations that can be challenging:\n\n- There are a lot of configuration options and scenarios to sort through. It can be an iterative process to discover what needs to be done to set up GitLab Runners.\n- Ensuring runners are a production-grade capability requires Infrastructure as Code (IaC) development so that high availability and scaling can be achieved by automatically spawning new instances.\n- Ensuring that runner deregistration happens correctly when GitLab Runners are automatically scaled in.\n- Additional cost-saving configurations, such as Spot compute and scheduled runner uptime, can complicate the automation requirements for AWS Autoscaling Groups (ASGs).\n- Large organizations often want developers to be able to easily self-service deploy runners with various configurations. Service Management Automation (SMA) has been made popular with products like Service Now, AWS Service Catalog, and AWS Control Tower. This automation is compatible with SMA.\n- It can be difficult to map runners to AWS and map AWS to runners in large organizations with numerous runners and AWS accounts.\n\n### Introducing the GitLab HA Scaling Runner Vending Machine for AWS\n\nAn effective way to handle multiple design considerations is to make a reusable tool. To help you with best practice runner deployments on AWS, we created the [GitLab HA Scaling Runner Vending Machine for AWS](https://gitlab.com/guided-explorations/aws/gitlab-runner-autoscaling-aws-asg/) (\"The GitLab Runner Vending Machine\"). It is created in AWS’ Infrastructure as Code, known as CloudFormation.\n\n> **Designed with AWS Well Architected:** This automation has many features beyond the scope of this blog post. The primary focus of this blog post is on managing costs. See the [full list of features here](https://gitlab.com/guided-explorations/aws/gitlab-runner-autoscaling-aws-asg/-/blob/main/FEATURES.md).\n\nThe GitLab Runner Vending Machine has the following cost management and scaling management benefits, exposed as a variety of parameters:\n\n- The ability to leverage Spot compute instances. This is important because it leaves CI/CD pipeline developers in charge of whether specific Gitlab CI/CD jobs run on Spot compute or not.\n- ASG-scheduled scaling so that a runner or runner fleet can be completely shutdown when not in use.\n- The GitLab Runner Vending Machine can leverage ARM compute for Linux - which runs faster and costs less.\n- It can also use ASG to update all runners in a fleet with the latest machine images and GitLab Runner version (or a specific version). When maintenance is not built-in, the labor cost of keeping things up-to-date can be significant.\n- Runner naming and tagging in AWS and GitLab, which eases the burden of locating runner instances and managing orphaned runners registrations, whether it is manual or automated.\n\n### How to save money with The GitLab Runner Vending Machine\n\nSignificant savings are possible with this IaC, whether your team wants to save on a single runner or a fleet of them.\n\nThe savings calculations below are for a single runner and should be linear for a given workload. To calculate your savings for more runners, simply multiply the final result by the number of runner instances. The available \"Runner Minutes\" per hour is calculated as the runner's job concurrency setting multiplied by the minutes in an hour. For this exercise, we'll use job concurrency of \"10\". This number should be changed depending on the instance types you are using and the load testing of your typical CI/CD workloads.\n\nJust like most performance analysis, we are assuming that hardware resource utilization is optimal and consistent. If a runner cluster can sustain respectable performance with 80% CPU loading, this calculation assumes that would be maintained regardless of the size of the cluster.\n\n#### AWS Graviton ARM and Spot savings\n\nThe GitLab Runner engineering team has completed performance testing that demonstrates performance gains of more than 30% on some AWS Graviton (ARM-based) instance types. Assuming that runners are performance-managed for optimized utilization, this gain is a direct cost savings. Just recently, we shared [how deploying GitLab on Arm-based AWS Graviton2 resulted in cost savings of 23% and 36% performance gains](/blog/achieving-23-cost-savings-and-36-performance-gain-using-gitlab-and-gitlab-runner-on-arm-neoverse-based-aws-graviton2-processor/).\n\n![ARM Efficiency Test Results For GitLab Runner](https://about.gitlab.com/images/blogimages/hundred-runners/hundredrunners-image1.png)\nGitLab Runner testing results for ARM-efficiency gains.\n{: .note.text-center}\n\n#### Scheduling savings\n\nThe savings can be dramatic when teams are able to turn off runners when not in use. For instance: Scheduling a runner to operate for 40-hours per week saves 76% when compared to the cost of running it for 168 hours. Runners that are just in use for 10 hours per week saves 94%.\n\n#### Combining scheduling, Spot, and ARM to save 97%\n\nJust for fun, let's see what savings are possible by comparing a standard runner scenario with deploying runners in customized, stand-alone instances to the maximum savings automation can deliver.\n\nImagine I am a developer who set up a custom GitLab Runner on an m5.xlarge instance, which is x86 the architecture, for a development team that works for 40 hours on the same time zone. Since there is no automation, the GitLab Runner runs 24/7. We will assume a job concurrency of 10, which gives 600 \"runner minutes\" per hour of run time. Scheduling uptime, running on Spot, and leveraging ARM can all be achieved quickly by redeploying the runner with The GitLab Runner Vending Machine.\n\nHere is the calculation to run the configuration described above, for one week: On Demand, x86, Always On: 1 x m5.xlarge = .192/hr x 168 hrs/week = **$32/week or $1664/year**\n\nHere are the savings that come from running Spot, ARM, and scheduling the Runner to be up just 40hrs/week: 1 x m6g.large Spot = .0419 x 40hrs/week x 64% (36% better performance) = **$1/week**\n\n$1/$32 x 100 = 3.125% of the original cost for the same work. In other words, **we just saved 97%** without ever impacting the ability to get the job done.\n\nIn short, The GitLab Runner Vending Machine intends to bring the many cost saving mechanisms of AWS Cloud computing to your GitLab Runner fleets.\n\nYou can save costs by using ARM/Graviton instances, Spot compute, or by scheduling uptime. In many cases, you can combine all three savings mechanisms for maximum impact.\n\n### Special pipeline building concerns for Spot Runners\n\nSpot instances can disappear with as little as two minutes of warning. This inevitably means some runners will be terminated while jobs are still in progress. CI/CD pipeline developers must take into account whether a job ought to run on compute resources that can disappear with short notice (so short as to be considered \"no notice\"). This comes down to deciding what jobs are OK to run on Spot and what jobs should instead run on AWS' persistent compute known as \"On-Demand\".\n\nThe GitLab Runner Vending Machine accounts for these constraints by tagging runner instances in GitLab with `computetype-spot` or `computetype-ondemand` – indicating in the \"tags\" segment of GitLab CI/CD jobs if a job should run on Spot compute.\n\nSome types of CI workloads, e.g., mass performance testing or large unit testing suites, may already have work queues and work tracking that make it ideal for Spot compute. Other activities, e.g., polling another system for a deployment status, could suffer a material discrepancy if terminated permaturely. Others, such as building the application, are sort of in the middle. Usually, restarting the build is sufficient.\n\n### Job configuration for Spot\n\nIf you need to reschedule terminated work, it is helpful to configure GitLab’s job `retry:` keyword. When working with a dispatching engine or work queue that automatically accounts for incompleted work by processing agents, the retry configuration is unnecessary.\n\nHere is an example that implements both of these concepts:\n\n```\nmy-scaled-test-suite:\n  parallel: 100\n  tags:\n  - computetype-Spot\n  retry:\n    max: 2\n    when:\n      - runner_system_failure\n      - unknown_failure\n```\n\nThe usage and limitations of `retry:` are discussed in greater detail in the [GitLab CI documentation on retry](https://docs.gitlab.com/ee/ci/yaml/#retry).\n\n### How to get started\n\nThe CloudFormation templates for the [GitLab Runner Vending Machine are managed in a public project on GitLab.com](https://gitlab.com/guided-explorations/aws/gitlab-runner-autoscaling-aws-asg/). There is a lot of information in the project about how the solution works and what problems it aims to solve, and will be useful for very experienced AWS builders.\n\nBut to keep it simple for users who want the quickest path to creating runners of all sizes, it also has an \"easy button\" page that has a table that looks like this:\n\n![Easy Button Page Sample](https://about.gitlab.com/images/blogimages/hundred-runners/hundredrunners-image2.png)\nThe easy buttons launch a CloudFormation Quick Create that only requires filling in a few fields.\n{: .note.text-center}\n\nKeep in mind that easy buttons intentionally hide the high degree of customization that is possible with this automation by setting the parameters for the most common scenarios in advance. Advanced AWS users should read more of the documentation in the repository to understand that the GitLab Runner Vending Machine is also capable of creating sophisticated runner fleets.\n\nFirst, click the CloudFormation icons to launch the Easy Button template directly into the CloudFormation Quick Create console. The Quick Create console is designed for simplicity to enable you to complete the prompts and then click one button to launch the stack.\n\n![CloudFormation Quick Create Example](https://about.gitlab.com/images/blogimages/hundred-runners/hundredrunners-image3.png){: .shadow.medium.center}\nThis is a typical Quick Create form for the GitLab Vending Machine easy buttons.\n{: .note.text-center}\n\nNext, select the deploy region by using the drop down menu in the upper right of the console (where the screenshot says \"Oregon\").\n\nIn most cases, you will only need to add your GitLab instance URL (GitLab.com is fine if that is where your repositories are), and the runner token, which you retrieve from the group level or project you wish to attach the runners to. If you are registering against a self-managed instance, you can use the instance-level tokens from the administrator console to register the runner for use across the entire instance. Read on for [instructions for finding Runner Registration Tokens](https://docs.gitlab.com/runner/register/#requirements).\n\nA few other customization parameters are available for your convenience.\n\nNote that the automation attempts to use the default VPC of the region in which you deploy and the default security group for the VPC. In some organizations, default VPCs and/or their security groups are locked. You can deploy to custom VPCs by using the full template instead of an easy button. On the easy button page look for the footnote \"Not any easy button person?\"\" to find a link to the full template.\n\nWatch the video below to see the deployment of provisioning 100 GitLab Spot Runners on AWS in less than 10 minutes and in less than 10 clicks for just $5 per hour.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/EW4RJv5zW4U\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nCheck out the YouTube playlist for more relevant videos about [GitLab and AWS](https://youtube.com/playlist?list=PL05JrBw4t0Ko30Bkf8bAvR-8E441Fy2G9)\n\n### This automation does much, much more\n\nWhile this article focused how much you can saving while using Spot for scaled runners, the underlying automation is capable of many other scenarios. Below is a summary of the additional features and benefits covered in the documentation.\n\n- Scaled runners that are persistent (not Spot) ([see more easy buttons here](https://gitlab.com/guided-explorations/aws/gitlab-runner-autoscaling-aws-asg/-/blob/main/easybuttons.md)).\n- Supports small, single runner setups and scaled ones.\n- Supports GitLab.com SaaS or self-managed instances.\n- Automates OS patching and Runner version upgrading.\n- Supports Windows and Linux.\n- Can be reused with Amazon provisioning services such as Service Catalog and Control Tower.\n- Implements least privilege security throughout.\n- Supports deregistering runners on scale-in or Spot termination.\n\nA full feature list is in the document [Features of GitLab HA Scaling Runner Vending Machine for AWS](https://gitlab.com/guided-explorations/aws/gitlab-runner-autoscaling-aws-asg/-/blob/main/FEATURES.md)\n\n### Easy running\n\nWe hope that this automation will make deployment of runners of all sizes simple for you. We are open to your feedback, suggestions and contributions in the GitLab project.\n",[976,977,9,1243],{"slug":9096,"featured":6,"template":686},"100-runners-in-less-than-10mins-and-less-than-10-clicks","content:en-us:blog:100-runners-in-less-than-10mins-and-less-than-10-clicks.yml","100 Runners In Less Than 10mins And Less Than 10 Clicks","en-us/blog/100-runners-in-less-than-10mins-and-less-than-10-clicks.yml","en-us/blog/100-runners-in-less-than-10mins-and-less-than-10-clicks",49,[663,691,712,735,756,778,800,821,841],1753309443355]