{"text": "#include \n\n#include \"Werk/Math/SummaryStatistics.hpp\"\n\nBOOST_AUTO_TEST_SUITE(SummaryStatistics)\n\nBOOST_AUTO_TEST_CASE(TestEmpty) {\n\tWerk::SummaryStatistics s;\n\tBOOST_REQUIRE_EQUAL(s.count(),0);\n}\n\nBOOST_AUTO_TEST_CASE(TestBasic) {\n\tWerk::SummaryStatistics s;\n\ts.sample(5.0);\n\ts.sample(1.0);\n\tBOOST_REQUIRE_EQUAL(s.count(), 2);\n BOOST_REQUIRE_EQUAL(s.sum(), 6.0);\n BOOST_REQUIRE_EQUAL(s.average(), 3.0);\n BOOST_REQUIRE_EQUAL(s.variance(), 4.0);\n BOOST_REQUIRE_EQUAL(s.stddev(), 2.0);\n s.reset();\n BOOST_REQUIRE_EQUAL(s.count(),0);\n const char* filename = \"summary.txt\";\n FILE* file = fopen(filename, \"rb\");\n s.writeJson(file);\n}\n\nBOOST_AUTO_TEST_SUITE_END()", "meta": {"hexsha": "2662b94ebebdea02af699358171ac965213e029a", "size": 725, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/WerkTest/Math/SummaryStatistics.cpp", "max_stars_repo_name": "mish24/werk", "max_stars_repo_head_hexsha": "2f8822842fb8f68a4402775d1d3b41021b5a9945", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/WerkTest/Math/SummaryStatistics.cpp", "max_issues_repo_name": "mish24/werk", "max_issues_repo_head_hexsha": "2f8822842fb8f68a4402775d1d3b41021b5a9945", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/WerkTest/Math/SummaryStatistics.cpp", "max_forks_repo_name": "mish24/werk", "max_forks_repo_head_hexsha": "2f8822842fb8f68a4402775d1d3b41021b5a9945", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.8928571429, "max_line_length": 43, "alphanum_fraction": 0.7144827586, "num_tokens": 190, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7718434978390747, "lm_q2_score": 0.6477982315512489, "lm_q1q2_score": 0.4999988529344828}} {"text": "#include \n\n#include \n#include \n#include \n\nusing namespace QuantLib;\n\nvoid outputBasket(std::vector > basket,\n boost::shared_ptr termStructure) {\n\n std::cout << \"Calibration Basket:\" << std::endl;\n std::cout << \"expiry;maturityDate;expiryTime;maturityTime;nominal;rate;marketvol\" << std::endl;\n for(Size j=0;j helper = boost::dynamic_pointer_cast(basket[j]);\n Date endDate = helper->underlyingSwap()->fixedSchedule().dates().back();\n Real nominal = helper->underlyingSwap()->nominal();\n Real vol = helper->volatility()->value();\n Real rate = helper->underlyingSwap()->fixedRate();\n Date expiry = helper->swaption()->exercise()->date(0);\n Real expiryTime = termStructure->timeFromReference(expiry);\n Real endTime = termStructure->timeFromReference(endDate);\n // std::cout << expiry << \";\" << endDate << \";\" << expiryTime << \";\" << endTime << \";\" <<\n // nominal << \";\" << rate << \";\" << vol << std::endl;\n std::cout << expiry << \" & \" << endDate << \" & \" << expiryTime << \" & \" << endTime << \" & \" <<\n nominal << \" & \" << rate << \" \\\\\\\\\" << std::endl;\n }\n\n}\n\nvoid outputModel(std::vector& expiries, boost::shared_ptr model) {\n\n std::cout << \"Model parameters: \";\n std::cout << \"expiry;volatility\" << std::endl;\n for(Size i=0;iparams()[i] << std::endl;\n std::cout << expiries[i] << \" & \" << model->volatility()[i] << \" \\\\\\\\\" << std::endl;\n }\n std::cout << std::endl;\n\n}\n\nint main(int, char* []) {\n\n Date refDate(17,June,2013);\n Settings::instance().evaluationDate() = refDate;\n Date effective = TARGET().advance(refDate,2*Days);\n Date maturity = TARGET().advance(effective,10*Years);\n\n // market data: flat yts 3%, flat vol 20%\n\n Real rateLevel = 0.03;\n Real volLevel = 0.20;\n\n boost::shared_ptr ytsQuote0(new SimpleQuote(rateLevel));\n boost::shared_ptr ytsQuote1(new SimpleQuote(rateLevel+0.0010)); // 10bp shift up for dv01, dv02 calculation\n boost::shared_ptr ytsQuote2(new SimpleQuote(rateLevel-0.0010)); // 10bp shift down for dv01, dv02 calculation\n\n RelinkableHandle ytsQuote(ytsQuote0);\n\n Handle yts( boost::shared_ptr(new FlatForward(0,TARGET(),ytsQuote,\n Actual365Fixed())));\n\n boost::shared_ptr volQuote0(new SimpleQuote(volLevel));\n boost::shared_ptr volQuote1(new SimpleQuote(volLevel+0.01));\n\n RelinkableHandle volQuote(volQuote0);\n\n boost::shared_ptr swaptionVol(new ConstantSwaptionVolatility(0,TARGET(),\n ModifiedFollowing,volQuote,Actual365Fixed()));\n\n boost::shared_ptr iborIndex(new Euribor(6*Months,yts));\n boost::shared_ptr standardSwapBase(new EuriborSwapIsdaFixA(10*Years,yts));\n\n // spread Quote\n\n Real spreadLevel = 0.0100;\n boost::shared_ptr spreadQuote0(new SimpleQuote(spreadLevel));\n\n RelinkableHandle spreadQuote(spreadQuote0);\n\n // non standard swaption instrument (10y, amortizing nominal and step up coupon, yearly exercise dates)\n // we use the nonstandard swap and the nonstandard swaption for bond pricing, i.e. set the floating side to zero\n // receiver swap means we are long the bond\n // we are short the call right, so the rebate is positive\n\n std::vector fixedNominal(10), floatingNominal(20,0.0), fixedRate(10);\n for(Size i=0;i<10;i++) {\n fixedNominal[i] = 100.0;//-i*10.0;//-i*5000000;//(i>0 ? fixedNominal[i-1] : 100000000.0 )*1.075;\n floatingNominal[2*i] = floatingNominal[2*i+1] = 0.0; //fixedNominal[i];\n fixedRate[i] = 0.035;//+0.0030*i;\n }\n\n Schedule fixedSchedule(effective,maturity,1*Years,TARGET(),ModifiedFollowing,ModifiedFollowing,\n DateGeneration::Forward,false);\n Schedule floatingSchedule(effective,maturity,6*Months,TARGET(),ModifiedFollowing,ModifiedFollowing,\n DateGeneration::Forward,false);\n\n boost::shared_ptr underlying(new NonstandardSwap(VanillaSwap::Receiver,fixedNominal,floatingNominal,\n fixedSchedule,fixedRate,Thirty360(),floatingSchedule,\n iborIndex,1.0,0.0,Actual360(),true,true));\n\n std::vector exerciseDates;\n std::vector rebates;\n for(Size i=1;i<10;i++) {\n exerciseDates.push_back(TARGET().advance(fixedSchedule[i],-2*Days));\n rebates.push_back(-100.0);\n }\n\n //BermudanExercise exerciseTmp(exerciseDates, false);\n boost::shared_ptr exercise =\n boost::make_shared(\n BermudanExercise(exerciseDates, false), rebates, 2, TARGET(),\n Following);\n\n boost::shared_ptr swaption(\n new NonstandardSwaption(underlying, exercise));\n\n // pricing of vanilla bond part\n\n std::cout << \"============================================================\" << std::endl;\n std::cout << \"Vanilla part pricing\" << std::endl;\n std::cout << \"============================================================\" << std::endl;\n\n Leg vanilla = underlying->leg(0);\n Leg other = underlying->leg(1);\n\n Real vanillaNpv = CashFlows::npv(vanilla,*yts,spreadQuote->value(),yts->dayCounter(),Continuous,NoFrequency,false);\n Real otherNpv = CashFlows::npv(other,*yts,spreadQuote->value(),yts->dayCounter(),Continuous,NoFrequency,false);\n\n std::cout << \"npv = \" << vanillaNpv << \" (other = \" << otherNpv << \")\" << std::endl;\n\n // gsr model (1% mean reversion, intially 1% vol)\n\n exerciseDates.pop_back();\n std::vector stepDates(exerciseDates);\n std::vector vols(exerciseDates.size()+1,0.01);\n std::vector reversions(exerciseDates.size()+1,0.01);\n\n boost::shared_ptr gsr(new Gsr(yts,stepDates,vols,reversions,50.0));\n\n // engines for nonstandard swaption and standard swaption\n\n // this engine is used for standard swaptions used in model calibration\n boost::shared_ptr standardEngine(new Gaussian1dSwaptionEngine(gsr));\n // this engine is used for the non standard swaption\n boost::shared_ptr nonStandardEngine(new Gaussian1dNonstandardSwaptionEngine(gsr,64,7.0,true,false,\n spreadQuote));\n\n swaption->setPricingEngine(nonStandardEngine);\n\n std::cout.precision(6);\n std::cout << std::fixed;\n\n std::cout << \"============================================================\" << std::endl;\n std::cout << \"Model is not calibrated\" << std::endl;\n std::cout << \"============================================================\" << std::endl;\n outputModel(stepDates,gsr);\n\n std::cout << \"Calculate calibration basket\" << std::endl;\n std::vector > basket = swaption->calibrationBasket(standardSwapBase,swaptionVol);\n for(Size i=0;isetPricingEngine(standardEngine);\n outputBasket(basket,*gsr->termStructure());\n\n std::cout << \"Calibrate the model to the initial basket\" << std::endl;\n LevenbergMarquardt lm;\n EndCriteria ec(2000,200,1E-8,1E-8,1E-8);\n gsr->calibrateVolatilitiesIterative(basket,lm,ec);\n outputModel(stepDates,gsr);\n\n std::cout << \"Calculate calibration basket\" << std::endl;\n\n std::vector > basket2 = swaption->calibrationBasket(standardSwapBase,swaptionVol);\n for(Size i=0;isetPricingEngine(standardEngine);\n outputBasket(basket2,*gsr->termStructure());\n\n std::cout << \"Calibrate the model to the second basket\" << std::endl;\n\n gsr->calibrateVolatilitiesIterative(basket2,lm,ec);\n outputModel(stepDates,gsr);\n\n std::cout << \"Price the nonstandard swaption\" << std::endl;\n\n Real npv0 = swaption->NPV();\n\n std::cout << \"Shift the rate curve by 10bp up, down and reprice to compute delta, gamma\" << std::endl;\n\n ytsQuote.linkTo(ytsQuote1);\n Real npv1 = swaption->NPV();\n ytsQuote.linkTo(ytsQuote2);\n Real npv2 = swaption->NPV();\n ytsQuote.linkTo(ytsQuote0);\n\n Real npv3 = swaption->NPV();\n\n std::cout << \"NPV(-10bp) = \" << npv2 << \" NPV(0) = \" << npv0 << \" NPV(+10bp) = \" << npv1 << std::endl;\n std::cout << \"DV01 = \" << (npv1-npv2) / 20.0 << std::endl;\n std::cout << \"DV02 = \" << (npv1-2.0*npv0+npv2) / 100.0 << std::endl;\n std::cout << \"Vega = \" << (npv3-npv0) << std::endl;\n\n std::cout << \"============================================================\" << std::endl;\n std::cout << \"Compute delta, gamma with model recalibration\" << std::endl;\n std::cout << \"============================================================\" << std::endl;\n\n gsr->calibrateVolatilitiesIterative(basket2,lm,ec);\n outputModel(stepDates,gsr);\n npv0 = swaption->NPV();\n\n ytsQuote.linkTo(ytsQuote1);\n gsr->calibrateVolatilitiesIterative(basket2,lm,ec); outputModel(stepDates,gsr);\n npv1 = swaption->NPV();\n ytsQuote.linkTo(ytsQuote2);\n gsr->calibrateVolatilitiesIterative(basket2,lm,ec); outputModel(stepDates,gsr);\n npv2 = swaption->NPV();\n ytsQuote.linkTo(ytsQuote0);\n\n for(Size i=0;i(*basket2[i]->volatility())\n ->setValue(volQuote1->value());\n gsr->calibrateVolatilitiesIterative(basket2,lm,ec); outputModel(stepDates,gsr);\n npv3 = swaption->NPV();\n for(Size i=0;i(*basket2[i]->volatility())\n ->setValue(volQuote0->value());\n\n std::cout << \"NPV(-10bp) = \" << npv2 << \" NPV(0) = \" << npv0 << \" NPV(+10bp) = \" << npv1 << std::endl;\n std::cout << \"DV01 = \" << (npv1-npv2) / 20.0 << std::endl;\n std::cout << \"DV02 = \" << (npv1-2.0*npv0+npv2) / 100.0 << std::endl;\n std::cout << \"Vega = \" << (npv3-npv0) << std::endl;\n\n std::cout << \"============================================================\" << std::endl;\n std::cout << \"Compute delta, gamma with basket recalculation and recalibration\" << std::endl;\n std::cout << \"============================================================\" << std::endl;\n\n gsr->calibrateVolatilitiesIterative(basket2,lm,ec);\n outputModel(stepDates,gsr);\n npv0 = swaption->NPV();\n\n ytsQuote.linkTo(ytsQuote1);\n gsr->calibrateVolatilitiesIterative(basket2,lm,ec); outputModel(stepDates,gsr);\n std::vector > basket3a = swaption->calibrationBasket(standardSwapBase,swaptionVol);\n for(Size i=0;isetPricingEngine(standardEngine);\n outputBasket(basket3a,*gsr->termStructure());\n gsr->calibrateVolatilitiesIterative(basket3a,lm,ec); outputModel(stepDates,gsr);\n npv1 = swaption->NPV();\n\n ytsQuote.linkTo(ytsQuote2);\n\n gsr->calibrateVolatilitiesIterative(basket2,lm,ec); outputModel(stepDates,gsr);\n std::vector > basket3b = swaption->calibrationBasket(standardSwapBase,swaptionVol);\n for(Size i=0;isetPricingEngine(standardEngine);\n outputBasket(basket3b,*gsr->termStructure());\n gsr->calibrateVolatilitiesIterative(basket3b,lm,ec); outputModel(stepDates,gsr);\n npv2 = swaption->NPV();\n\n ytsQuote.linkTo(ytsQuote0);\n\n for(Size i=0;i(*basket2[i]->volatility())->\n setValue(volQuote1->value());\n volQuote.linkTo(volQuote1);\n gsr->calibrateVolatilitiesIterative(basket2,lm,ec); outputModel(stepDates,gsr);\n std::vector > basket3c = swaption->calibrationBasket(standardSwapBase,swaptionVol);\n for(Size i=0;isetPricingEngine(standardEngine);\n outputBasket(basket3c,*gsr->termStructure());\n gsr->calibrateVolatilitiesIterative(basket3c,lm,ec); outputModel(stepDates,gsr);\n npv3 = swaption->NPV();\n for(Size i=0;i(*basket2[i]->volatility())->\n setValue(volQuote0->value());\n volQuote.linkTo(volQuote0);\n\n\n std::cout << \"NPV(-10bp) = \" << npv2 << \" NPV(0) = \" << npv0 << \" NPV(+10bp) = \" << npv1 << std::endl;\n std::cout << \"DV01 = \" << (npv1-npv2) / 20.0 << std::endl;\n std::cout << \"DV02 = \" << (npv1-2.0*npv0+npv2) / 100.0 << std::endl;\n std::cout << \"Vega = \" << (npv3-npv0) << std::endl;\n\n\n\n}\n", "meta": {"hexsha": "f1f28dcd6666f032b5b215ff4ad6b9825a3dbb45", "size": 13199, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Examples/CallableBonds2/CallableBonds2.cpp", "max_stars_repo_name": "universe1987/QuantLib", "max_stars_repo_head_hexsha": "bbb0145aff285853755b9f6ed013f53a41163acb", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2016-03-28T15:05:23.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-17T23:05:57.000Z", "max_issues_repo_path": "Examples/CallableBonds2/CallableBonds2.cpp", "max_issues_repo_name": "universe1987/QuantLib", "max_issues_repo_head_hexsha": "bbb0145aff285853755b9f6ed013f53a41163acb", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2015-02-02T20:32:43.000Z", "max_issues_repo_issues_event_max_datetime": "2015-02-02T20:32:43.000Z", "max_forks_repo_path": "Examples/CallableBonds2/CallableBonds2.cpp", "max_forks_repo_name": "pcaspers/quantlib", "max_forks_repo_head_hexsha": "bbb0145aff285853755b9f6ed013f53a41163acb", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 10.0, "max_forks_repo_forks_event_min_datetime": "2015-01-26T14:50:24.000Z", "max_forks_repo_forks_event_max_datetime": "2015-10-23T07:41:30.000Z", "avg_line_length": 47.4784172662, "max_line_length": 124, "alphanum_fraction": 0.615425411, "num_tokens": 3653, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7718434978390747, "lm_q2_score": 0.6477982315512488, "lm_q1q2_score": 0.4999988529344827}} {"text": "#include \n#include \n\nusing namespace boost::multiprecision;\nusing namespace boost::random;\n\n\nuint256_t rand_gen(){\n\ttypedef independent_bits_engine generator_type;\n generator_type gen;\n //\n // Generate some values:\n //\n //std::cout << std::hex << std::showbase;\n //for(unsigned i = 0; i < 10; ++i)\n // std::cout << gen() << std::endl;\n return gen();\n}\n", "meta": {"hexsha": "53e6c9d3510ea92b52f9458250330de7fa3fec88", "size": 448, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/rand_gen.hpp", "max_stars_repo_name": "SwaroopReddyBasireddy/Proof-of-Assets", "max_stars_repo_head_hexsha": "011ee85ad4e7941c2bfbf53c1872fbaa40038cf5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/rand_gen.hpp", "max_issues_repo_name": "SwaroopReddyBasireddy/Proof-of-Assets", "max_issues_repo_head_hexsha": "011ee85ad4e7941c2bfbf53c1872fbaa40038cf5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/rand_gen.hpp", "max_forks_repo_name": "SwaroopReddyBasireddy/Proof-of-Assets", "max_forks_repo_head_hexsha": "011ee85ad4e7941c2bfbf53c1872fbaa40038cf5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.5789473684, "max_line_length": 73, "alphanum_fraction": 0.6629464286, "num_tokens": 120, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES\n\n", "lm_q1_score": 0.7718434978390747, "lm_q2_score": 0.6477982179521105, "lm_q1q2_score": 0.4999988424380762}} {"text": "\n# include \n# include \n# include \n# include \n# include \n# include \n# include \n# include \n# include \n# include \n#include \n\n#include \n\n\nusing namespace std;\n\n\nvector split(string& input, char delimiter)\n{\n istringstream stream(input);\n string field;\n vector result;\n while (getline(stream, field, delimiter)) {\n result.push_back(field);\n }\n return result;\n}\n\nint get_dataset(const char* file_path, int*& ou_array, int*& ov_array, float*& y0_array)\n{\t\n\n\tvector ou;\n\tvector ov;\n\tvector y0;\n\tifstream ifs(file_path);\n\tstring line;\n\twhile(getline(ifs,line)){\n\n\t\tstd::vector strvec = split(line, ' ');\n\n\t\tou.push_back( stoi(strvec.at(0)) );\n\t\tov.push_back( stoi(strvec.at(1)) );\n\t\ty0.push_back( stof(strvec.at(2)) );\n\n\n\t}\n\tint k = ou.size();\n\tou_array = new int[k];\n\tov_array = new int[k];\n\ty0_array = new float[k];\n\tfor (int i = 0; i < k; ++i)\n\t{\n\t\tou_array[i] = ou[i];\n\t\tov_array[i] = ov[i];\n\t\ty0_array[i] = (float)y0[i];\n\t}\n\n\treturn k;\n}\n\nvoid show_vec_array(const std::vector >& array)\n{\n\tfor (int i = 0; i < array.size(); ++i)\n\t{\n\t\t\n\t\t// std::vector array_i = array[i];\n\t\tfor (int j = 0; j < array[i].size(); ++j)\n\t\t{\n\t\t\tprintf(\"%i \", array[i][j]);\n\t\t}\n\t\tprintf(\"\\n\");\n\t}\n}\n\nvoid show_array(int N, int M, float** array)\n{\n\tfor (int i = 0; i < N; ++i)\n\t{\n\t\tfor (int j = 0; j < M; ++j)\n\t\t{\n\t\t\tprintf(\"%f \", array[i][j]);\n\t\t}\n\t\tprintf(\"\\n\");\n\t}\n}\n\nint maximum_value(int* array, int size)\n{\n\tint max = array[0];\n\tfor (int i = 0; i < size; ++i)\n\t{\n\t\tif (max < array[i])\n\t\t{\n\t\t\tmax = array[i];\n\t\t}\n\t}\n\treturn max;\n}\n\nfloat mean_matrix(float** matrix, int N, int M)\n{\n\tfloat sm = 0;\n\tfor (int i = 0; i < N; ++i)\n\t{\n\t\tfor (int j = 0; j < M; ++j)\n\t\t{\n\t\t\tsm += matrix[i][j];\n\t\t}\n\t}\n\tsm = sm/(float)(N*M);\n\treturn sm;\n}\n\nfloat mean_array(float* array, int size)\n{\n\tfloat sm = 0;\n\tfor (int i = 0; i < size; ++i)\n\t{\n\t\tsm += array[i];\n\t}\n\treturn sm/(float)size;\n}\n\nfloat std_array(float* array, int size, float mean)\n{\n\tfloat sm = 0;\n\tfor (int i = 0; i < size; ++i)\n\t{\n\t\tsm += pow(array[i]-mean, 2);\n\t}\n\tsm = sm / (float)size;\n\tsm = sqrt(sm);\n\treturn sm;\n}\n\n\ntuple< vector< vector >, vector< vector >, vector< vector >, vector< vector > > getNb(int* ou, int* ov,int N,int M,int k)\n{\n\tvector< vector > nbu;\n\tvector< vector > nbv;\n\tvector< vector > nbul;\n\tvector< vector > nbvl;\n\n\tnbu = vector>(N, vector(0, 0));\n\tnbul = vector>(N, vector(0, 0));\n\tnbv = vector>(M, vector(0, 0));\n\tnbvl = vector>(M, vector(0, 0));\n\n\tfor (int i = 0; i < k; ++i)\n\t{\n\t\tint u_ind = ou[i];\n\t\tint v_ind = ov[i];\n\n\t\tnbu[u_ind].push_back(v_ind);\n\t\tnbul[u_ind].push_back(i);\n\n\t\tnbv[v_ind].push_back(u_ind);\n\t\tnbvl[v_ind].push_back(i);\n\t}\n\n\treturn std::forward_as_tuple(nbu, nbv, nbul, nbvl);\n\n}\n\ntuple< float**, float**, int, float* > cbmf(int N, int M, int* ou, int* ov, const vector>& nbu, const vector>& nbv, const vector>& nbul, const vector>& nbvl, int k, float* y0, int maxCnt, float gam, double conv, int R, float lam, int* te_u, int* te_v, float* te_y0, int k_te, float mean, float std)\n{\n\n\tstd::random_device rd;\n\tstd::mt19937 mt(rd());\n\tstd::uniform_real_distribution rdm(0.0, 1.0);\n\n\tfloat *rmse_arr = new float[maxCnt];\n\n\t// allocating memory\n\tfloat **a_hat = new float*[k];\n\tfloat **b_hat = new float*[k];\n\tfloat **c_hat = new float*[k];\n\tfloat **d_hat = new float*[k];\n\tfloat **alf = new float*[k];\n\tfloat **beta = new float*[k];\n\tfloat **gamma = new float*[k];\n\tfloat **delta = new float*[k];\n\tfor (int i = 0; i < k; ++i)\n\t{\n\t\ta_hat[i] = new float[R];\n\t\tb_hat[i] = new float[R];\n\t\tc_hat[i] = new float[R];\n\t\td_hat[i] = new float[R];\n\t\talf[i] = new float[R];\n\t\tbeta[i] = new float[R];\n\t\tgamma[i] = new float[R];\n\t\tdelta[i] = new float[R];\n\t}\n\n\n\tfloat **a_m_hat = new float*[N];\n\tfloat **b_m_hat = new float*[N];\n\tfloat **c_m_hat = new float*[M];\n\tfloat **d_m_hat = new float*[M];\n\tfor (int i = 0; i < N; ++i)\n\t{\n\t\ta_m_hat[i] = new float[R];\n\t\tb_m_hat[i] = new float[R];\n\t}\n\tfor (int i = 0; i < M; ++i)\n\t{\n\t\tc_m_hat[i] = new float[R];\n\t\td_m_hat[i] = new float[R];\n\t}\n\n\tfloat *alf_m = new float[k];\n float *beta_m = new float[k];\n float *gamma_m = new float[k];\n float *delta_m = new float[k];\n\n float **u = new float*[N];\n for (int i = 0; i < N; ++i)\n {\n \tu[i] = new float[R];\n }\n float **v = new float*[M];\n for (int i = 0; i < M; ++i)\n {\n \tv[i] = new float[R];\n }\n\n\n auto t0 = std::chrono::system_clock::now();\n\n\n // initialize u and v\n\tarma::sp_mat spmx(N,M);\n for (int i = 0; i < k; ++i)\n {\n \tspmx(ou[i], ov[i]) = y0[i];\n }\n arma::mat U;\n\tarma::vec s;\n\tarma::mat V;\n\n\tsvds(U, s, V, spmx, R);\n\n\ts = arma::sqrt(s);\n\tarma::mat S = diagmat(s);\n\n\tU = U*S;\n\tV = V*S;\n\n\n for (int i = 0; i < N; ++i)\n {\n \tfor (int r = 0; r < R; ++r)\n \t{\n \t\tu[i][r] = U(i,r);\n \t}\n }\n for (int i = 0; i < M; ++i)\n {\n \tfor (int r = 0; r < R; ++r)\n \t{\n \t\tv[i][r] = V(i,r);\n \t}\n }\n\n\n\tfor (int i = 0; i < k; ++i)\n\t{\n\t\tfor (int j = 0; j < R; ++j)\n\t\t{\n\t\t\ta_hat[i][j] = rdm(mt)*5;\n\t\t\tb_hat[i][j] = -a_hat[i][j]*u[ou[i]][j];\n\t\t\t\n\t\t\tc_hat[i][j] = rdm(mt)*5;\n\t\t\td_hat[i][j] = -c_hat[i][j]*v[ov[i]][j];\n\t\t}\n\t}\n\n\n\tfor (int i = 0; i < N; ++i)\n\t{\n\t\tfor (int r = 0; r < R; ++r)\n\t\t{\n\t\t\t\n\t\t\tfloat a_sm = 0;\n\t\t\tfloat b_sm = 0;\n\t\t\tfor (const auto& nb: nbul[i])\n\t\t\t{\n\t\t\t\ta_sm += a_hat[nb][r];\n\t\t\t\tb_sm += b_hat[nb][r];\n\t\t\t}\n\t\t\ta_m_hat[i][r] = a_sm + lam;\n\t\t\tb_m_hat[i][r] = b_sm;\n\t\t}\n\t}\n\tfor (int i = 0; i < M; ++i)\n\t{\n\t\tfor (int r = 0; r < R; ++r)\n\t\t{\n\t\t\t\n\t\t\tfloat c_sm = 0;\n\t\t\tfloat d_sm = 0;\n\t\t\tfor (const auto& nb: nbvl[i])\n\t\t\t{\n\t\t\t\tc_sm += c_hat[nb][r];\n\t\t\t\td_sm += d_hat[nb][r];\n\t\t\t}\n\t\t\tc_m_hat[i][r] = c_sm + lam;\n\t\t\td_m_hat[i][r] = d_sm;\n\t\t}\n\t}\n\n\n\tfor (int i = 0; i < k; ++i)\n\t{\n\t\tfor (int r = 0; r < R; ++r)\n\t\t{\n\t\t\tfloat vov = v[ov[i]][r];\n\t\t\tfloat vov2 = pow(vov, 2);\n\t\t\tfloat uou = u[ou[i]][r];\n \t\tfloat uou2 = pow(uou, 2);\n\t\t\talf[i][r] = vov2 / ( a_m_hat[ou[i]][r] - a_hat[i][r] );\n\t\t\tbeta[i][r] = ( b_m_hat[ou[i]][r] - b_hat[i][r] ) * vov / ( a_m_hat[ou[i]][r] - a_hat[i][r] );\n\t\t\tgamma[i][r] = uou2 /( c_m_hat[ov[i]][r] - c_hat[i][r] );\n\t\t\tdelta[i][r] = ( d_m_hat[ov[i]][r] - d_hat[i][r] ) * uou / ( c_m_hat[ov[i]][r] - c_hat[i][r] );\n\t\t}\n\t}\n\n\tfloat sm1;\n\tfloat sm2;\n\tfloat sm3;\n\tfloat sm4;\n\tfor (int i = 0; i < k; ++i)\n\t{\t\n\t\tsm1 = 0;\n\t\tsm2 = 0;\n\t\tsm3 = 0;\n\t\tsm4 = 0;\n\t\tfor (int r = 0; r < R; ++r)\n\t\t{\n\t\t\tsm1 += alf[i][r];\n\t\t\tsm2 += beta[i][r];\n\t\t\tsm3 += gamma[i][r];\n\t\t\tsm4 += delta[i][r];\n\t\t}\n\t\talf_m[i] = sm1 + 1;\n\t\tbeta_m[i] = sm2;\n\t\tgamma_m[i] = sm3 + 1;\n\t\tdelta_m[i] = sm4;\n\t}\n\t\n\n\n\t ///////////////\n /* main loop */\n ///////////////\n int cnt;\n float rmse = 0.0;\n for (cnt = 0; cnt < maxCnt; ++cnt)\n {\n\n \t/* u update */\n\n\n \t// update alf and beta\n \tfloat dm;\n \tfloat vov;\n \tfloat vov2;\n \tfor (int i = 0; i < k; ++i)\n\t\t{\n\t\t\tfor (int r = 0; r < R; ++r)\n\t\t\t{\n\t\t\t\tvov = v[ov[i]][r];\n\t\t\t\tvov2 = pow(vov, 2);\n\t\t\t\tdm = a_m_hat[ou[i]][r] - a_hat[i][r];\n\n\t\t\t\talf[i][r] = vov2 / dm;\n\t\t\t\tbeta[i][r] = ( b_m_hat[ou[i]][r] - b_hat[i][r] ) * vov / dm;\n\t\t\t}\n\t\t}\n\n\n\t\t// update alf_m and beta_m\n\t\tfloat sm1;\n\t\tfloat sm2;\n\t\tfor (int i = 0; i < k; ++i)\n\t\t{\t\n\t\t\tsm1 = 0;\n\t\t\tsm2 = 0;\n\t\t\tfor (int r = 0; r < R; ++r)\n\t\t\t{\n\t\t\t\tsm1 += alf[i][r];\n\t\t\t\tsm2 += beta[i][r];\n\t\t\t}\n\t\t\talf_m[i] = sm1 + 1;\n\t\t\tbeta_m[i] = sm2 + y0[i];\n\t\t}\n\n\n \t// update a and b hat\n \tfor (int i = 0; i < k; ++i)\n \t{\t\n \t\tfor (int r = 0; r < R; ++r)\n \t\t{\n \t\t\tvov = v[ov[i]][r];\n \t\t\tvov2 = pow(vov, 2);\n \t\t\tdm = alf_m[i] - alf[i][r];\n\n \t\t\ta_hat[i][r] = (1-gam)*a_hat[i][r] + gam*vov2/dm;\n \t\t\tb_hat[i][r] = (1-gam)*b_hat[i][r] - gam*( beta_m[i] - beta[i][r] )*vov/dm;\n \t\t}\n \t}\n\n\n \t// update a and b marginalized hat\n \tfloat a_sm;\n \tfloat b_sm;\n \tfor (int i = 0; i < N; ++i)\n\t\t{\n\t\t\tfor (int r = 0; r < R; ++r)\n\t\t\t{\n\t\t\t\ta_sm = 0;\n\t\t\t\tb_sm = 0;\n\t\t\t\tfor (const auto& nb: nbul[i])\n\t\t\t\t{\n\t\t\t\t\ta_sm += a_hat[nb][r];\n\t\t\t\t\tb_sm += b_hat[nb][r];\n\t\t\t\t}\n\t\t\t\ta_m_hat[i][r] = a_sm + lam;\n\t\t\t\tb_m_hat[i][r] = b_sm;\n\t\t\t}\n\t\t}\n\n\n\t\t// update u\n\t\tfor (int i = 0; i < N; ++i)\n\t\t{\n\t\t\tfor (int r = 0; r < R; ++r)\n\t\t\t{\n\t\t\t\tu[i][r] = -b_m_hat[i][r]/a_m_hat[i][r];\n\t\t\t}\n\t\t}\n\n\n\n\t\t/* update v */\n\n\t\t// update gamma and delta\n\t\tfloat uou;\n\t\tfloat uou2;\n\t\tfor (int i = 0; i < k; ++i)\n\t\t{\n\t\t\tfor (int r = 0; r < R; ++r)\n\t\t\t{\n\t\t\t\tuou = u[ou[i]][r];\n\t \t\tuou2 = pow(uou, 2);\n\t \t\tdm = c_m_hat[ov[i]][r] - c_hat[i][r];\n\n\t\t\t\tgamma[i][r] = uou2 /dm;\n\t\t\t\tdelta[i][r] = ( d_m_hat[ov[i]][r] - d_hat[i][r] ) * uou / dm;\n\t\t\t}\n\t\t}\n\n\n\t\t// update gamma_m and delta_m\n\t\tfor (int i = 0; i < k; ++i)\n\t\t{\t\n\t\t\tsm1 = 0.0;\n\t\t\tsm2 = 0.0;\n\t\t\tfor (int r = 0; r < R; ++r)\n\t\t\t{\n\t\t\t\tsm1 += gamma[i][r];\n\t\t\t\tsm2 += delta[i][r];\n\t\t\t}\n\t\t\tgamma_m[i] = sm1 + 1;\n\t\t\tdelta_m[i] = sm2 + y0[i];\n\t\t}\n\n\n\t\t// update c and d hat\n \tfor (int i = 0; i < k; ++i)\n \t{\n \t\tfor (int r = 0; r < R; ++r)\n \t\t{\n \t\t\tuou = u[ou[i]][r];\n \t\t\tuou2 = pow(uou, 2);\n \t\t\tdm = gamma_m[i] - gamma[i][r];\n\n \t\t\tc_hat[i][r] = (1-gam)*c_hat[i][r] + gam*uou2/dm;\n \t\t\td_hat[i][r] = (1-gam)*d_hat[i][r] - gam*( delta_m[i] - delta[i][r] )*uou/dm;\n \t\t}\n \t}\n\n \t// update c and d marginalized hat\n \tfloat c_sm;\n \tfloat d_sm;\n \tfor (int i = 0; i < M; ++i)\n\t\t{\n\t\t\tfor (int r = 0; r < R; ++r)\n\t\t\t{\n\t\t\t\t\n\t\t\t\tc_sm = 0;\n\t\t\t\td_sm = 0;\n\t\t\t\tfor (const auto& nb: nbvl[i])\n\t\t\t\t{\n\t\t\t\t\tc_sm += c_hat[nb][r];\n\t\t\t\t\td_sm += d_hat[nb][r];\n\t\t\t\t}\n\t\t\t\tc_m_hat[i][r] = c_sm + lam;\n\t\t\t\td_m_hat[i][r] = d_sm;\n\t\t\t}\n\t\t}\n\n\t\t\n\n\t\t// update v\n\t\tfor (int i = 0; i < M; ++i)\n\t\t{\n\t\t\tfor (int r = 0; r < R; ++r)\n\t\t\t{\n\t\t\t\tv[i][r] = -d_m_hat[i][r]/c_m_hat[i][r];\n\t\t\t}\n\t\t}\n\n\n\n\t\t// calulate test rmse\n\t\tfloat mse = 0;\n\t\tfor (int i = 0; i < k_te; ++i)\n\t\t{\n\n\t\t\tfloat inf_y0 = 0;\n\t\t\tint u_i = te_u[i];\n\t\t\tint v_i = te_v[i];\n\t\t\tfor (int r = 0; r < R; ++r)\n\t\t\t{\n\t\t\t\tinf_y0 += u[u_i][r]*v[v_i][r];\n\t\t\t}\n\t\t\tinf_y0 = inf_y0*std + mean;\n\t\t\tmse += pow(inf_y0 - te_y0[i], 2);\n\n\t\t}\n\t\tmse = mse / (float)k_te;\n\t\tfloat new_rmse = sqrt(mse);\n\t\tfloat dif = abs(new_rmse-rmse);\n\n\t\trmse_arr[cnt] = new_rmse;\n\n\t\tif (cnt%1 == 0)\n\t\t{\n\t\t\tprintf(\"iteration:%d \", cnt);\n\t\t\tprintf(\"rmse:%f\\n\", new_rmse);\n\t\t}\n\n\t\trmse = new_rmse;\n\t\tif (dif < conv || dif != dif)\n\t\t{\n\t\t\tbreak;\n\t\t}\n\t\t\n\n\n } // end main loop\n\n return std::forward_as_tuple(u, v, cnt, rmse_arr);\n\n}\n\n\n\ntuple< float**, float**, int, float* > approx_cbmf(int N, int M, int* ou, int* ov, const vector>& nbu, const vector>& nbv, const vector>& nbul, const vector>& nbvl, int k, float* y0, int maxCnt, float gam, double conv, int R, float lam, int* te_u, int* te_v, float* te_y0, int k_te, float mean, float std)\n{\n\n\tstd::random_device rd;\n\tstd::mt19937 mt(rd());\n\tstd::uniform_real_distribution rdm(0.0, 1.0);\n\tstd::normal_distribution g_rdm(0.0,1.0);\n\n\tfloat *rmse_arr = new float[maxCnt];\n\n\t// allocating memory\n\tfloat *a = new float[k];\n\tfloat *b = new float[k];\n\tfloat *c = new float[k];\n\tfloat *d = new float[k];\n\n\n\tfloat **a_hat = new float*[N];\n\tfloat **b_hat = new float*[N];\n\tfloat **u = new float*[N];\n\tfloat **c_hat = new float*[M];\n\tfloat **d_hat = new float*[M];\n\tfloat **v = new float*[M];\n\tfor (int i = 0; i < N; ++i)\n\t{\n\t\ta_hat[i] = new float[R];\n\t\tb_hat[i] = new float[R];\n\t\tu[i] = new float[R];\n\t}\n\tfor (int i = 0; i < M; ++i)\n\t{\n\t\tc_hat[i] = new float[R];\n\t\td_hat[i] = new float[R];\n\t\tv[i] = new float[R];\n\t}\n\n\n arma::sp_mat spmx(N,M);\n for (int i = 0; i < k; ++i)\n {\n \tspmx(ou[i], ov[i]) = y0[i];\n }\n arma::mat U;\n\tarma::vec s;\n\tarma::mat V;\n\n\tsvds(U, s, V, spmx, R);\n\n\ts = arma::sqrt(s);\n\tarma::mat S = diagmat(s);\n\n\tU = U*S;\n\tV = V*S;\n\n\n for (int i = 0; i < N; ++i)\n {\n \tfor (int r = 0; r < R; ++r)\n \t{\n \t\tu[i][r] = U(i,r);\n \t}\n }\n for (int i = 0; i < M; ++i)\n {\n \tfor (int r = 0; r < R; ++r)\n \t{\n \t\tv[i][r] = V(i,r);\n \t}\n }\n\n\n // initialize a,b,c,d hat\n\tfor (int i = 0; i < N; ++i)\n\t{\n\t\tfor (int j = 0; j < R; ++j)\n\t\t{\n\t\t\ta_hat[i][j] = rdm(mt)+100;\n\t\t\tb_hat[i][j] = (a_hat[i][j])*u[i][j];\n\t\t}\n\t}\n\tfor (int i = 0; i < M; ++i)\n\t{\n\t\tfor (int j = 0; j < R; ++j)\n\t\t{\n\t\t\tc_hat[i][j] = rdm(mt)+100;\n\t\t\td_hat[i][j] = (c_hat[i][j])*v[i][j];\n\t\t}\n\t}\n\n\t// initialize alf, beta, gamma, delta\n\tfor (int i = 0; i < k; ++i)\n\t{\n\t\ta[i] = rdm(mt);\n\t\tb[i] = rdm(mt);\n\t\tc[i] = rdm(mt);\n\t\td[i] = rdm(mt);\n\t}\n\n\n\t ///////////////\n /* main loop */\n ///////////////\n printf(\"main loop\\n\");\n float vov;\n\tfloat vov2;\n\tfloat uou;\n\tfloat uou2;\n float sm1;\n float sm2;\n int cnt;\n float rmse = 0.0;\n for (cnt = 0; cnt < maxCnt; ++cnt)\n {\n\n \t/* u update */\n\n \t// update alf and beta\n \tfor (int i = 0; i < k; ++i)\n\t\t{\t\n\t\t\tsm1 = 0;\n\t\t\tsm2 = 0;\n\t\t\tfor (int r = 0; r < R; ++r)\n\t\t\t{\n\t\t\t\tvov = v[ov[i]][r];\n\t\t\t\tuou = u[ou[i]][r];\n\t\t\t\tvov2 = pow(vov, 2);\n\n\n\t\t\t\t// a\n\t\t\t\tsm1 += vov2/(a_hat[ou[i]][r]+lam);\n\t\t\t\t// b\n\t\t\t\tsm2 += vov*uou;\n\t\t\t}\n\t\t\tb[i] = ( y0[i]-sm2+a[i]*b[i] ) / ( 1+a[i] );\n\t\t\ta[i] = sm1;\n\t\t\t\n\t\t}\n\n\n\n \t// update a and b hat\n \tfor (int i = 0; i < N; ++i)\n\t\t{\n\t\t\tfor (int r = 0; r < R; ++r)\n\t\t\t{\n\t\t\t\tsm1 = 0;\n\t\t\t\tsm2 = 0;\n\t\t\t\tfor (const auto& nb: nbul[i])\n\t\t\t\t{\n\t\t\t\t\tvov = v[ov[nb]][r];\n\t\t\t\t\tvov2 = pow(vov, 2);\n\t\t\t\t\tuou = u[i][r];\n\n\t\t\t\t\t// a_hat\n\t\t\t\t\tsm1 += vov2/(1+a[nb]);\n\n\t\t\t\t\t// b_hat\n\t\t\t\t\tsm2 += b[nb]*vov;\n\t\t\t\t\tsm2 += uou*vov2/(1+a[nb]);\n\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tb_hat[i][r] = (1-gam)*b_hat[i][r] + gam*sm2;\n\t\t\t\ta_hat[i][r] = (1-gam)*a_hat[i][r] + gam*sm1;\n\n\t\t\t\t// update u\n\t\t\t\tu[i][r] = b_hat[i][r]/(a_hat[i][r]+lam);\n\n\t\t\t}\n\t\t}\n\n\n\n\t\t/* update v */\n\n\t\t// update c and d\n \tfor (int i = 0; i < k; ++i)\n\t\t{\t\n\t\t\tsm1 = 0;\n\t\t\tsm2 = 0;\n\t\t\tfor (int r = 0; r < R; ++r)\n\t\t\t{\n\t\t\t\tvov = v[ov[i]][r];\n\t\t\t\tuou = u[ou[i]][r];\n\t\t\t\tuou2 = pow(uou, 2);\n\n\t\t\t\t// a\n\t\t\t\tsm1 += uou2/(c_hat[ov[i]][r]+lam);\n\t\t\t\t// b\n\t\t\t\tsm2 += vov*uou;\n\t\t\t}\n\t\t\td[i] = ( y0[i]-sm2+c[i]*d[i] ) / ( 1+c[i] );\n\t\t\tc[i] = sm1;\n\t\t\t\n\t\t}\n\n\n\t\t// update c and d hat\n \tfor (int i = 0; i < M; ++i)\n\t\t{\n\t\t\tfor (int r = 0; r < R; ++r)\n\t\t\t{\n\t\t\t\tsm1 = 0;\n\t\t\t\tsm2 = 0;\n\t\t\t\tfor (const auto& nb: nbvl[i])\n\t\t\t\t{\n\t\t\t\t\t// vov = v[ov[nb]][r];\n\t\t\t\t\tvov = v[i][r];\n\t\t\t\t\tuou = u[ou[nb]][r];\n\t\t\t\t\tuou2 = pow(uou, 2);\n\n\t\t\t\t\t// a_hat\n\t\t\t\t\tsm1 += uou2/(1+c[nb]);\n\n\t\t\t\t\t// b_hat\n\t\t\t\t\tsm2 += d[nb]*uou;\n\t\t\t\t\tsm2 += vov*uou2/(1+c[nb]);\n\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\td_hat[i][r] = (1-gam)*d_hat[i][r] + gam*sm2;\n\t\t\t\tc_hat[i][r] = (1-gam)*c_hat[i][r] + gam*sm1;\n\n\t\t\t\t// update v\n\t\t\t\tv[i][r] = d_hat[i][r]/(c_hat[i][r]+lam);\n\t\t\t}\n\t\t}\n\t\t\n\n\n\n\t\t// calulate test rmse\n\t\tfloat mse = 0;\n\t\tfor (int i = 0; i < k_te; ++i)\n\t\t{\n\n\t\t\tfloat inf_y0 = 0;\n\t\t\tint u_i = te_u[i];\n\t\t\tint v_i = te_v[i];\n\t\t\tfor (int r = 0; r < R; ++r)\n\t\t\t{\n\t\t\t\tinf_y0 += u[u_i][r]*v[v_i][r];\n\t\t\t}\n\t\t\tinf_y0 = inf_y0*std + mean;\n\n\t\t\tmse += pow(inf_y0 - te_y0[i], 2);\n\n\t\t}\n\t\tmse = mse / (float)k_te;\n\t\tfloat new_rmse = sqrt(mse);\n\t\tfloat dif = abs(new_rmse-rmse);\n\n\t\trmse_arr[cnt] = new_rmse;\n\n\t\tif (cnt%1 == 0)\n\t\t{\n\t\t\tprintf(\"iteration:%d \", cnt);\n\t\t\tprintf(\"rmse:%f\\n\", new_rmse);\n\t\t}\n\n\t\trmse = new_rmse;\n\t\tif (dif < conv || dif != dif)\n\t\t{\n\t\t\tbreak;\n\t\t}\n\n\n } // end main loop\n\n return std::forward_as_tuple(u, v, cnt, rmse_arr);\n\n}\n\nvoid usage(string& doc)\n{\n\tdoc = \"Usage:\\n\";\n\tdoc += \" ./cbmf [-l learningrate] [-L lambda] [-R rank] [-m maxiteration] [-r trainfilename] [-t testfilename] [-o outpath] [-c convint] [-p] [-h] [-v]\\n\";\n\tdoc += \"\\n\";\n\tdoc += \"Options:\\n\";\n\tdoc += \" -l Learning rate. [default: 0.3]\\n\";\n\tdoc += \" -L Regularization parameter. [default: 3]\\n\";\n\tdoc += \" -R Rank. [default: 10]\\n\";\n\tdoc += \" -m Maximum number of iterations. [default: 100]\\n\";\n\tdoc += \" -r Filename of dataset for training. [default: dataset/ml_1m_train.txt]\\n\";\n\tdoc += \" -t Filename of dataset for test. [default: dataset/ml_1m_test.txt]\\n\";\n\tdoc += \" -o Where output files are to be placed. [default: output/]\\n\";\n\tdoc += \" -c Exponent of convergence condition. If RMSE < pow(10, convint) is satisfied, it is regarded as convergence. [default: -5]\\n\";\n\tdoc += \" -p If this option is set, ACBMF is to be performed. Without this option, CBMF is to be done.\\n\";\n\tdoc += \" -h Show help.\\n\";\n\tdoc += \" -v Show version.\\n\";\n\tdoc += \"\\n\";\n\tdoc += \"Examples:\\n\";\n\tdoc += \"Performing CBMF using 'dataset/ml_1m_train.txt' as training dataset and 'dataset/ml_1m_test.txt' as test dataset.\\n\";\n\tdoc += \" ./cbmf -r dataset/ml_1m_train.txt -t dataset/ml_1m_test.txt\\n\";\n\tdoc += \"Performing ACBMF using 'dataset/ml_1m_train.txt' as training dataset and 'dataset/ml_1m_test.txt' as test dataset.\\n\";\n\tdoc += \" ./cbmf -p -r dataset/ml_1m_train.txt -t dataset/ml_1m_test.txt\\n\";\n\tdoc += \"Showing help.\\n\";\n\tdoc += \" ./cbmf -h\\n\";\n\tdoc += \"Showing version.\\n\";\n\tdoc += \" ./cbmf -v\";\n}\n\nvoid version(string& doc)\n{\n\tdoc = \"cbmf v1.0\";\n}\n\n\nint main(int argc, char **argv)\n{\n\tfloat gam = 0.3;\n\tfloat lam = 3;\n\tint R = 10;\n\tint maxCnt = 100;\n\tconst char* train_filename = \"dataset/ml_1m_train.txt\";\n\tconst char* test_filename = \"dataset/ml_1m_test.txt\";\n\tconst char* outpath = \"output/\";\n\tint is_approx = 0;\n\tint conv_int = -5;\n\tint opt;\n\tstring doc = \"\";\n\tusage(doc);\n\tstring vsn = \"\";\n\tversion(vsn);\n\twhile ((opt = getopt(argc, argv, \"l:L:R:m:r:t:o:c:pshv\")) != -1) {\n switch (opt) {\n case 'l': gam=atof(optarg); break;\n case 'L': lam=atof(optarg); break;\n case 'R': R=atoi(optarg); break;\n case 'm': maxCnt=atoi(optarg); break;\n case 'r': train_filename=optarg; break;\n case 't': test_filename=optarg; break;\n case 'o': outpath=optarg; break;\n case 'c': conv_int=atoi(optarg); break;\n case 'p': is_approx=1; break;\n case 'h':\n \tprintf(\"%s\\n\", doc.c_str());\n \treturn 0;\n case 'v':\n \tprintf(\"%s\\n\", vsn.c_str());\n \treturn 0;\n default: \n \tprintf(\"%s\\n\", doc.c_str());\n \treturn 1;\n }\n }\n double conv = pow(10, conv_int);\n\tint folds = 10;\n\n\t\n\tprintf(\"learning rate:%f\\n\", gam);\n\tprintf(\"lambda:%f\\n\", lam);\n\tprintf(\"rank:%d\\n\", R);\n\tprintf(\"maxCnt:%d\\n\", maxCnt);\n\tprintf(\"is_approx:%d\\n\", is_approx);\n\tprintf(\"conv:%f\\n\", conv);\n\n\n\tint* ou_te = NULL;\n\tint* ov_te = NULL;\n\tfloat* y0_te = NULL;\n\tint* ou_tr = NULL;\n\tint* ov_tr = NULL;\n\tfloat* y0_tr = NULL;\n\tint k_te = get_dataset(test_filename,ou_te, ov_te, y0_te);\n\tint k_tr = get_dataset(train_filename,ou_tr, ov_tr, y0_tr);\n\n\tprintf(\"dataset loaded\\n\");\n\n\tint N = maximum_value(ou_tr, k_tr)+1;\n\tint M = maximum_value(ov_tr, k_tr)+1;\n\tint K = k_tr;\n\tprintf(\"N:%d\\n\", N);\n\tprintf(\"M:%d\\n\", M);\n\tprintf(\"K:%d\\n\", K);\n\n\n\tfloat mean = mean_array(y0_tr, k_tr);\n\tfloat std = std_array(y0_tr, k_tr, mean);\n\tprintf(\"mean:%f\\n\", mean);\n\tprintf(\"std:%f\\n\", std);\n\tfor (int i = 0; i < k_tr; ++i)\n\t{\n\t\ty0_tr[i] = (y0_tr[i]-mean)/std;\n\t}\n\n\n\tvector< vector > nbu;\n\tvector< vector > nbv;\n\tvector< vector > nbul;\n\tvector< vector > nbvl;\n\tstd::tie(nbu, nbv, nbul, nbvl) = getNb(ou_tr, ov_tr, N, M, K);\n\n\tprintf(\"got nb\\n\");\n\n\n\tfloat** u;\n\tfloat** v;\n\tint cnt;\n\tfloat* rmse_arr;\n\n\tauto start = std::chrono::system_clock::now();\n\tif (is_approx == 0)\n\t{\n\t\tstd::tie(u, v, cnt, rmse_arr) = cbmf(N, M, ou_tr, ov_tr, nbu, nbv, nbul, nbvl, K, y0_tr, maxCnt, gam, conv, R, lam, ou_te, ov_te, y0_te, k_te, mean, std);\n\t}\n\telse\n\t{\n\t\tstd::tie(u, v, cnt, rmse_arr) = approx_cbmf(N, M, ou_tr, ov_tr, nbu, nbv, nbul, nbvl, K, y0_tr, maxCnt, gam, conv, R, lam, ou_te, ov_te, y0_te, k_te, mean, std);\n\t}\n\tauto end = std::chrono::system_clock::now();\n\n\t\n\tchar gam_char[8];\n\tsprintf(gam_char, \"%.2f\", gam);\n\tchar lam_char[8];\n\tsprintf(lam_char, \"%.5f\", lam);\n\tstd::string filename_base = \"gam=\" + string(gam_char) + \"_lam=\" + string(lam_char) + \"_R=\" + to_string(R) + \"_is_approx=\" + to_string(is_approx);\n\tofstream rmse_f(outpath + filename_base + \"_rmse_approx.txt\");\n\tfor (int i = 0; i < cnt+1; ++i)\n\t{\n\t\tchar rmse_char[16];\n\t\tsprintf(rmse_char, \"%.5f\", rmse_arr[i]);\n\t\trmse_f << rmse_char;\n\t\trmse_f << '\\n';\n\t}\n\trmse_f.close();\n\t\n\n\tprintf(\"main end\\n\");\n\t\n\t \n\treturn 0;\n}", "meta": {"hexsha": "0f7d533446c0871221c5ad8cdd1129a0dd80621d", "size": 20196, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "cbmf.cpp", "max_stars_repo_name": "chnoguchi/cbmf", "max_stars_repo_head_hexsha": "3fceb4605b5f682c42606fb067d49efd85a593a6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cbmf.cpp", "max_issues_repo_name": "chnoguchi/cbmf", "max_issues_repo_head_hexsha": "3fceb4605b5f682c42606fb067d49efd85a593a6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cbmf.cpp", "max_forks_repo_name": "chnoguchi/cbmf", "max_forks_repo_head_hexsha": "3fceb4605b5f682c42606fb067d49efd85a593a6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2020-12-28T02:50:49.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-09T14:21:40.000Z", "avg_line_length": 20.0955223881, "max_line_length": 355, "alphanum_fraction": 0.5051000198, "num_tokens": 7850, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7718434978390747, "lm_q2_score": 0.6477982179521103, "lm_q1q2_score": 0.49999884243807613}} {"text": "\r\n#include \r\n\r\n#include \r\n\r\nNTL_START_IMPL\r\n\r\n\r\nstatic void ExactDiv(ZZ& qq, const ZZ& a, const ZZ& b)\r\n{\r\n NTL_ZZRegister(q);\r\n NTL_ZZRegister(r);\r\n\r\n DivRem(q, r, a, b);\r\n if (!IsZero(r)) {\r\n cerr << \"a = \" << a << \"\\n\";\r\n cerr << \"b = \" << b << \"\\n\";\r\n LogicError(\"ExactDiv: nonzero remainder\");\r\n }\r\n qq = q;\r\n}\r\n\r\n\r\nstatic void BalDiv(ZZ& q, const ZZ& a, const ZZ& d)\r\n\r\n// rounds a/d to nearest integer, breaking ties\r\n// by rounding towards zero. Assumes d > 0.\r\n\r\n{\r\n NTL_ZZRegister(r);\r\n DivRem(q, r, a, d);\r\n\r\n\r\n add(r, r, r);\r\n\r\n long cmp = compare(r, d);\r\n if (cmp > 0 || (cmp == 0 && q < 0))\r\n add(q, q, 1);\r\n}\r\n\r\n\r\n\r\nstatic void MulAddDiv(ZZ& c, const ZZ& c1, const ZZ& c2, \r\n const ZZ& x, const ZZ& y, const ZZ& z)\r\n\r\n// c = (x*c1 + y*c2)/z\r\n\r\n{\r\n NTL_ZZRegister(t1);\r\n NTL_ZZRegister(t2);\r\n\r\n mul(t1, x, c1);\r\n mul(t2, y, c2);\r\n add(t1, t1, t2);\r\n ExactDiv(c, t1, z);\r\n}\r\n\r\n\r\nstatic void MulSubDiv(ZZ& c, const ZZ& c1, const ZZ& c2, \r\n const ZZ& x, const ZZ& y, const ZZ& z)\r\n\r\n// c = (x*c1 - y*c2)/z\r\n\r\n{\r\n NTL_ZZRegister(t1);\r\n NTL_ZZRegister(t2);\r\n\r\n mul(t1, x, c1);\r\n mul(t2, y, c2);\r\n sub(t1, t1, t2);\r\n ExactDiv(c, t1, z);\r\n}\r\n \r\n\r\n\r\n\r\n\r\n#if 0\r\n\r\nstatic void MulSubDiv(vec_ZZ& c, const vec_ZZ& c1, const vec_ZZ& c2,\r\n const ZZ& x, const ZZ& y, const ZZ& z)\r\n\r\n// c = (x*c1 + y*c2)/z\r\n\r\n{\r\n long n = c1.length();\r\n if (c2.length() != n) LogicError(\"MulSubDiv: length mismatch\");\r\n c.SetLength(n);\r\n\r\n long i;\r\n for (i = 1; i <= n; i++) \r\n MulSubDiv(c(i), c1(i), c2(i), x, y, z);\r\n}\r\n\r\n#endif\r\n\r\nstatic void RowTransform(vec_ZZ& c1, vec_ZZ& c2,\r\n const ZZ& x, const ZZ& y, const ZZ& u, const ZZ& v)\r\n\r\n// (c1, c2) = (x*c1 + y*c2, u*c1 + v*c2)\r\n\r\n{\r\n long n = c1.length();\r\n if (c2.length() != n) LogicError(\"MulSubDiv: length mismatch\");\r\n NTL_ZZRegister(t1);\r\n NTL_ZZRegister(t2);\r\n NTL_ZZRegister(t3);\r\n NTL_ZZRegister(t4);\r\n\r\n long i;\r\n for (i = 1; i <= n; i++) {\r\n mul(t1, x, c1(i));\r\n mul(t2, y, c2(i));\r\n add(t1, t1, t2);\r\n\r\n mul(t3, u, c1(i));\r\n mul(t4, v, c2(i));\r\n add(t3, t3, t4);\r\n\r\n c1(i) = t1;\r\n c2(i) = t3;\r\n }\r\n}\r\n\r\nstatic void RowTransform(ZZ& c1, ZZ& c2,\r\n const ZZ& x, const ZZ& y, const ZZ& u, const ZZ& v)\r\n\r\n// (c1, c2) = (x*c1 + y*c2, u*c1 + v*c2)\r\n\r\n{\r\n NTL_ZZRegister(t1);\r\n NTL_ZZRegister(t2);\r\n NTL_ZZRegister(t3);\r\n NTL_ZZRegister(t4);\r\n\r\n mul(t1, x, c1);\r\n mul(t2, y, c2);\r\n add(t1, t1, t2);\r\n\r\n mul(t3, u, c1);\r\n mul(t4, v, c2);\r\n add(t3, t3, t4);\r\n\r\n c1 = t1;\r\n c2 = t3;\r\n}\r\n\r\n\r\n\r\nstatic void MulSubFrom(vec_ZZ& c, const vec_ZZ& c2, const ZZ& x)\r\n\r\n// c = c - x*c2\r\n\r\n{\r\n long n = c.length();\r\n if (c2.length() != n) LogicError(\"MulSubFrom: length mismatch\");\r\n\r\n long i;\r\n for (i = 1; i <= n; i++)\r\n MulSubFrom(c(i), c2(i), x);\r\n}\r\n\r\nstatic void MulSubFrom(vec_ZZ& c, const vec_ZZ& c2, long x)\r\n\r\n// c = c - x*c2\r\n\r\n{\r\n long n = c.length();\r\n if (c2.length() != n) LogicError(\"MulSubFrom: length mismatch\");\r\n\r\n long i;\r\n for (i = 1; i <= n; i++)\r\n MulSubFrom(c(i), c2(i), x);\r\n}\r\n\r\n\r\n \r\n \r\n \r\nstatic long SwapTest(const ZZ& d0, const ZZ& d1, const ZZ& d2, const ZZ& lam,\r\n long a, long b)\r\n\r\n// test if a*d1^2 > b*(d0*d2 + lam^2)\r\n\r\n{\r\n NTL_ZZRegister(t1);\r\n NTL_ZZRegister(t2);\r\n\r\n mul(t1, d0, d2);\r\n sqr(t2, lam);\r\n add(t1, t1, t2);\r\n mul(t1, t1, b);\r\n\r\n sqr(t2, d1);\r\n mul(t2, t2, a);\r\n\r\n return t2 > t1;\r\n}\r\n\r\n\r\n\r\n\r\n\r\n\r\nstatic\r\nvoid reduce(long k, long l, \r\n mat_ZZ& B, vec_long& P, vec_ZZ& D, \r\n vec_vec_ZZ& lam, mat_ZZ* U)\r\n{\r\n NTL_ZZRegister(t1);\r\n NTL_ZZRegister(r);\r\n\r\n if (P(l) == 0) return;\r\n add(t1, lam(k)(P(l)), lam(k)(P(l)));\r\n abs(t1, t1);\r\n if (t1 <= D[P(l)]) return;\r\n\r\n long j;\r\n long rr, small_r;\r\n\r\n BalDiv(r, lam(k)(P(l)), D[P(l)]);\r\n\r\n if (r.WideSinglePrecision()) {\r\n small_r = 1;\r\n rr = to_long(r);\r\n }\r\n else {\r\n small_r = 0;\r\n }\r\n \r\n if (small_r) {\r\n MulSubFrom(B(k), B(l), rr);\r\n\r\n if (U) MulSubFrom((*U)(k), (*U)(l), rr);\r\n\r\n for (j = 1; j <= l-1; j++)\r\n if (P(j) != 0)\r\n MulSubFrom(lam(k)(P(j)), lam(l)(P(j)), rr);\r\n MulSubFrom(lam(k)(P(l)), D[P(l)], rr);\r\n }\r\n else {\r\n MulSubFrom(B(k), B(l), r);\r\n\r\n if (U) MulSubFrom((*U)(k), (*U)(l), r);\r\n\r\n for (j = 1; j <= l-1; j++)\r\n if (P(j) != 0)\r\n MulSubFrom(lam(k)(P(j)), lam(l)(P(j)), r);\r\n MulSubFrom(lam(k)(P(l)), D[P(l)], r);\r\n }\r\n\r\n\r\n}\r\n\r\n\r\nstatic\r\nlong swap(long k, mat_ZZ& B, vec_long& P, vec_ZZ& D, \r\n vec_vec_ZZ& lam, mat_ZZ* U, long m, long verbose)\r\n\r\n// swaps vectors k-1 and k; assumes P(k-1) != 0\r\n// returns 1 if vector k-1 need to be reduced after the swap...\r\n// this only occurs in 'case 2' when there are linear dependencies\r\n\r\n{\r\n long i, j;\r\n NTL_ZZRegister(t1);\r\n NTL_ZZRegister(t2);\r\n NTL_ZZRegister(t3);\r\n NTL_ZZRegister(e);\r\n NTL_ZZRegister(x);\r\n NTL_ZZRegister(y);\r\n\r\n\r\n if (P(k) != 0) {\r\n if (verbose) cerr << \"swap case 1: \" << k << \"\\n\";\r\n\r\n swap(B(k-1), B(k));\r\n if (U) swap((*U)(k-1), (*U)(k));\r\n \r\n for (j = 1; j <= k-2; j++)\r\n if (P(j) != 0)\r\n swap(lam(k-1)(P(j)), lam(k)(P(j)));\r\n\r\n for (i = k+1; i <= m; i++) {\r\n MulAddDiv(t1, lam(i)(P(k)-1), lam(i)(P(k)), \r\n lam(k)(P(k)-1), D[P(k)-2], D[P(k)-1]); \r\n MulSubDiv(t2, lam(i)(P(k)-1), lam(i)(P(k)), \r\n D[P(k)], lam(k)(P(k)-1), D[P(k)-1]);\r\n lam(i)(P(k)-1) = t1;\r\n lam(i)(P(k)) = t2;\r\n }\r\n\r\n MulAddDiv(D[P(k)-1], D[P(k)], lam(k)(P(k)-1),\r\n D[P(k)-2], lam(k)(P(k)-1), D[P(k)-1]);\r\n\r\n return 0;\r\n }\r\n else if (!IsZero(lam(k)(P(k-1)))) {\r\n if (verbose) cerr << \"swap case 2: \" << k << \"\\n\";\r\n XGCD(e, x, y, lam(k)(P(k-1)), D[P(k-1)]);\r\n\r\n ExactDiv(t1, lam(k)(P(k-1)), e);\r\n ExactDiv(t2, D[P(k-1)], e);\r\n\r\n t3 = t2;\r\n negate(t2, t2);\r\n RowTransform(B(k-1), B(k), t1, t2, y, x);\r\n if (U) RowTransform((*U)(k-1), (*U)(k), t1, t2, y, x);\r\n for (j = 1; j <= k-2; j++)\r\n if (P(j) != 0)\r\n RowTransform(lam(k-1)(P(j)), lam(k)(P(j)), t1, t2, y, x);\r\n\r\n sqr(t2, t2);\r\n ExactDiv(D[P(k-1)], D[P(k-1)], t2);\r\n\r\n for (i = k+1; i <= m; i++)\r\n if (P(i) != 0) {\r\n ExactDiv(D[P(i)], D[P(i)], t2);\r\n for (j = i+1; j <= m; j++) {\r\n ExactDiv(lam(j)(P(i)), lam(j)(P(i)), t2);\r\n }\r\n }\r\n\r\n for (i = k+1; i <= m; i++) {\r\n ExactDiv(lam(i)(P(k-1)), lam(i)(P(k-1)), t3);\r\n }\r\n\r\n swap(P(k-1), P(k));\r\n\r\n return 1;\r\n }\r\n else {\r\n if (verbose) cerr << \"swap case 3: \" << k << \"\\n\";\r\n\r\n swap(B(k-1), B(k));\r\n if (U) swap((*U)(k-1), (*U)(k));\r\n \r\n for (j = 1; j <= k-2; j++)\r\n if (P(j) != 0)\r\n swap(lam(k-1)(P(j)), lam(k)(P(j)));\r\n\r\n swap(P(k-1), P(k));\r\n\r\n return 0;\r\n }\r\n}\r\n\r\n \r\n\r\n\r\nstatic\r\nvoid IncrementalGS(mat_ZZ& B, vec_long& P, vec_ZZ& D, vec_vec_ZZ& lam, \r\n long& s, long k)\r\n{\r\n long n = B.NumCols();\r\n long m = B.NumRows();\r\n\r\n NTL_ZZRegister(u);\r\n NTL_ZZRegister(t1);\r\n NTL_ZZRegister(t2);\r\n\r\n long i, j;\r\n\r\n for (j = 1; j <= k-1; j++) {\r\n long posj = P(j);\r\n if (posj == 0) continue;\r\n\r\n InnerProduct(u, B(k), B(j));\r\n for (i = 1; i <= posj-1; i++) {\r\n mul(t1, D[i], u);\r\n mul(t2, lam(k)(i), lam(j)(i));\r\n sub(t1, t1, t2);\r\n div(t1, t1, D[i-1]);\r\n u = t1;\r\n }\r\n\r\n lam(k)(posj) = u;\r\n }\r\n\r\n InnerProduct(u, B(k), B(k));\r\n for (i = 1; i <= s; i++) {\r\n mul(t1, D[i], u);\r\n mul(t2, lam(k)(i), lam(k)(i));\r\n sub(t1, t1, t2);\r\n div(t1, t1, D[i-1]);\r\n u = t1;\r\n }\r\n\r\n if (u == 0) {\r\n P(k) = 0;\r\n }\r\n else {\r\n s++;\r\n P(k) = s;\r\n D[s] = u;\r\n }\r\n}\r\n\r\n\r\nstatic\r\nlong LLL(vec_ZZ& D, mat_ZZ& B, mat_ZZ* U, long a, long b, long verbose)\r\n{\r\n long m = B.NumRows();\r\n long n = B.NumCols();\r\n\r\n long force_reduce = 1;\r\n\r\n vec_long P;\r\n P.SetLength(m);\r\n\r\n D.SetLength(m+1);\r\n D[0] = 1;\r\n\r\n vec_vec_ZZ lam;\r\n\r\n lam.SetLength(m);\r\n\r\n long j;\r\n for (j = 1; j <= m; j++)\r\n lam(j).SetLength(m);\r\n\r\n if (U) ident(*U, m);\r\n\r\n long s = 0;\r\n\r\n long k = 1;\r\n long max_k = 0;\r\n\r\n\r\n while (k <= m) {\r\n if (k > max_k) {\r\n IncrementalGS(B, P, D, lam, s, k);\r\n max_k = k;\r\n }\r\n\r\n if (k == 1) {\r\n force_reduce = 1; \r\n k++;\r\n continue;\r\n }\r\n\r\n if (force_reduce)\r\n for (j = k-1; j >= 1; j--)\r\n reduce(k, j, B, P, D, lam, U);\r\n\r\n if (P(k-1) != 0 && \r\n (P(k) == 0 || \r\n SwapTest(D[P(k)], D[P(k)-1], D[P(k)-2], lam(k)(P(k)-1), a, b))) {\r\n force_reduce = swap(k, B, P, D, lam, U, max_k, verbose);\r\n k--;\r\n }\r\n else {\r\n force_reduce = 1;\r\n k++;\r\n }\r\n }\r\n\r\n D.SetLength(s+1);\r\n return s;\r\n}\r\n\r\n\r\n\r\nstatic\r\nlong image(ZZ& det, mat_ZZ& B, mat_ZZ* U, long verbose)\r\n{\r\n long m = B.NumRows();\r\n long n = B.NumCols();\r\n\r\n long force_reduce = 1;\r\n\r\n vec_long P;\r\n P.SetLength(m);\r\n\r\n vec_ZZ D;\r\n D.SetLength(m+1);\r\n D[0] = 1;\r\n\r\n vec_vec_ZZ lam;\r\n\r\n lam.SetLength(m);\r\n\r\n long j;\r\n for (j = 1; j <= m; j++)\r\n lam(j).SetLength(m);\r\n\r\n if (U) ident(*U, m);\r\n\r\n long s = 0;\r\n\r\n long k = 1;\r\n long max_k = 0;\r\n\r\n\r\n while (k <= m) {\r\n if (k > max_k) {\r\n IncrementalGS(B, P, D, lam, s, k);\r\n max_k = k;\r\n }\r\n\r\n if (k == 1) {\r\n force_reduce = 1; \r\n k++;\r\n continue;\r\n }\r\n\r\n if (force_reduce)\r\n for (j = k-1; j >= 1; j--) \r\n reduce(k, j, B, P, D, lam, U);\r\n\r\n if (P(k-1) != 0 && P(k) == 0) {\r\n force_reduce = swap(k, B, P, D, lam, U, max_k, verbose);\r\n k--;\r\n }\r\n else {\r\n force_reduce = 1;\r\n k++;\r\n }\r\n }\r\n\r\n det = D[s];\r\n return s;\r\n}\r\n\r\nlong LLL(ZZ& det, mat_ZZ& B, mat_ZZ& U, long verbose)\r\n{\r\n vec_ZZ D;\r\n long s;\r\n s = LLL(D, B, &U, 3, 4, verbose);\r\n det = D[s];\r\n return s;\r\n}\r\n\r\nlong LLL(ZZ& det, mat_ZZ& B, long verbose)\r\n{\r\n vec_ZZ D;\r\n long s;\r\n s = LLL(D, B, 0, 3, 4, verbose);\r\n det = D[s];\r\n return s;\r\n}\r\n\r\nlong LLL(ZZ& det, mat_ZZ& B, mat_ZZ& U, long a, long b, long verbose)\r\n{\r\n if (a <= 0 || b <= 0 || a > b || b/4 >= a) LogicError(\"LLL: bad args\");\r\n\r\n vec_ZZ D;\r\n long s;\r\n s = LLL(D, B, &U, a, b, verbose);\r\n det = D[s];\r\n return s;\r\n}\r\n\r\nlong LLL(ZZ& det, mat_ZZ& B, long a, long b, long verbose)\r\n{\r\n if (a <= 0 || b <= 0 || a > b || b/4 >= a) LogicError(\"LLL: bad args\");\r\n\r\n vec_ZZ D;\r\n long s;\r\n s = LLL(D, B, 0, a, b, verbose);\r\n det = D[s];\r\n return s;\r\n}\r\n\r\n\r\nlong LLL_plus(vec_ZZ& D_out, mat_ZZ& B, mat_ZZ& U, long verbose)\r\n{\r\n vec_ZZ D;\r\n long s;\r\n s = LLL(D, B, &U, 3, 4, verbose);\r\n D_out = D;\r\n return s;\r\n}\r\n\r\nlong LLL_plus(vec_ZZ& D_out, mat_ZZ& B, long verbose)\r\n{\r\n vec_ZZ D;\r\n long s;\r\n s = LLL(D, B, 0, 3, 4, verbose);\r\n D_out = D;\r\n return s;\r\n}\r\n\r\nlong LLL_plus(vec_ZZ& D_out, mat_ZZ& B, mat_ZZ& U, long a, long b, long verbose)\r\n{\r\n if (a <= 0 || b <= 0 || a > b || b/4 >= a) LogicError(\"LLL_plus: bad args\");\r\n\r\n vec_ZZ D;\r\n long s;\r\n s = LLL(D, B, &U, a, b, verbose);\r\n D_out = D;\r\n return s;\r\n}\r\n\r\nlong LLL_plus(vec_ZZ& D_out, mat_ZZ& B, long a, long b, long verbose)\r\n{\r\n if (a <= 0 || b <= 0 || a > b || b/4 >= a) LogicError(\"LLL_plus: bad args\");\r\n\r\n vec_ZZ D;\r\n long s;\r\n s = LLL(D, B, 0, a, b, verbose);\r\n D_out = D;\r\n return s;\r\n}\r\n\r\n\r\nlong image(ZZ& det, mat_ZZ& B, mat_ZZ& U, long verbose)\r\n{\r\n return image(det, B, &U, verbose);\r\n}\r\n\r\nlong image(ZZ& det, mat_ZZ& B, long verbose)\r\n{\r\n return image(det, B, 0, verbose);\r\n}\r\n\r\nlong LatticeSolve(vec_ZZ& x, const mat_ZZ& A, const vec_ZZ& y, long reduce)\r\n{\r\n long n = A.NumRows();\r\n long m = A.NumCols();\r\n\r\n if (y.length() != m)\r\n LogicError(\"LatticeSolve: dimension mismatch\");\r\n\r\n if (reduce < 0 || reduce > 2)\r\n LogicError(\"LatticeSolve: bad reduce parameter\");\r\n\r\n if (IsZero(y)) {\r\n x.SetLength(n);\r\n clear(x);\r\n return 1;\r\n }\r\n\r\n mat_ZZ A1, U1;\r\n ZZ det2;\r\n long im_rank, ker_rank;\r\n\r\n A1 = A;\r\n\r\n im_rank = image(det2, A1, U1);\r\n ker_rank = n - im_rank;\r\n\r\n mat_ZZ A2, U2;\r\n long new_rank;\r\n long i;\r\n\r\n A2.SetDims(im_rank + 1, m);\r\n for (i = 1; i <= im_rank; i++)\r\n A2(i) = A1(ker_rank + i);\r\n\r\n A2(im_rank + 1) = y;\r\n\r\n new_rank = image(det2, A2, U2);\r\n\r\n if (new_rank != im_rank || \r\n (U2(1)(im_rank+1) != 1 && U2(1)(im_rank+1) != -1))\r\n return 0;\r\n\r\n vec_ZZ x1;\r\n x1.SetLength(im_rank);\r\n\r\n for (i = 1; i <= im_rank; i++)\r\n x1(i) = U2(1)(i);\r\n\r\n if (U2(1)(im_rank+1) == 1)\r\n negate(x1, x1);\r\n\r\n vec_ZZ x2, tmp;\r\n x2.SetLength(n);\r\n clear(x2);\r\n tmp.SetLength(n);\r\n\r\n for (i = 1; i <= im_rank; i++) {\r\n mul(tmp, U1(ker_rank+i), x1(i));\r\n add(x2, x2, tmp);\r\n }\r\n\r\n if (reduce == 0) {\r\n x = x2;\r\n return 1;\r\n }\r\n else if (reduce == 1) {\r\n U1.SetDims(ker_rank+1, n);\r\n U1(ker_rank+1) = x2;\r\n image(det2, U1);\r\n\r\n x = U1(ker_rank + 1);\r\n return 1;\r\n }\r\n else if (reduce == 2) {\r\n U1.SetDims(ker_rank, n);\r\n LLL(det2, U1);\r\n U1.SetDims(ker_rank+1, n);\r\n U1(ker_rank+1) = x2;\r\n image(det2, U1);\r\n\r\n x = U1(ker_rank + 1);\r\n return 1;\r\n }\r\n\r\n return 0;\r\n} \r\n\r\n\r\n\r\nNTL_END_IMPL\r\n", "meta": {"hexsha": "d511deefd1d4b3b95b0e526fc98d83c2d102f668", "size": 13793, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "WinNTL-8_1_2/src/LLL.cpp", "max_stars_repo_name": "Brainloop-Security/secret-sharing", "max_stars_repo_head_hexsha": "56cd3bc808c666b653cbe2b2a5fb2cb9fe760cdd", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "WinNTL-8_1_2/src/LLL.cpp", "max_issues_repo_name": "Brainloop-Security/secret-sharing", "max_issues_repo_head_hexsha": "56cd3bc808c666b653cbe2b2a5fb2cb9fe760cdd", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "WinNTL-8_1_2/src/LLL.cpp", "max_forks_repo_name": "Brainloop-Security/secret-sharing", "max_forks_repo_head_hexsha": "56cd3bc808c666b653cbe2b2a5fb2cb9fe760cdd", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.5091937765, "max_line_length": 81, "alphanum_fraction": 0.4410208077, "num_tokens": 5039, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7718434978390747, "lm_q2_score": 0.6477982179521103, "lm_q1q2_score": 0.49999884243807613}} {"text": "// In debug builds define\r\n// #define GSL_THROW_ON_CONTRACT_VIOLATION\r\n// In release builds define\r\n// #define GSL_UNENFORCED_ON_CONTRACT_VIOLATION // Segmentation fault on GCC\r\n// or use assert() and define / undefine NDEBUG\r\n#include \r\n#include \r\n#include \r\n#include \r\n#include \r\n#include \r\n#if !defined(NDEBUG)\r\n#define GSL_THROW_ON_CONTRACT_VIOLATION\r\n#include \r\n#else\r\n#define Expects(cond)\r\n#define Ensures(cond)\r\n#endif\r\n\r\nusing std::cout; using std::wcout; using std::endl;\r\nusing namespace std::string_literals;\r\n\r\n\r\n// User-defined literals\r\nclass Probability\r\n{\r\npublic:\r\n constexpr explicit Probability(long double v)\r\n : value(v)\r\n {\r\n Expects(v < 1.0 && 0.0 <= v);\r\n // assert(v < 1.0 && 0.0 <= v);\r\n }\r\nprivate:\r\n long double value;\r\n\r\n friend constexpr Probability operator \"\" _prob(long double v);\r\n friend std::ostream& operator<<(std::ostream& os, const Probability& prob)\r\n { return os << \"Probability{\" << prob.value << \"}\"; }\r\n};\r\n\r\nclass BadProbability : public std::logic_error\r\n{ \r\npublic:\r\n explicit BadProbability(long double v)\r\n : std::logic_error( \"BadProbability exception, got value: \" + std::to_string(v) )\r\n { }\r\n};\r\n\r\nconstexpr Probability operator \"\" _prob(long double v)\r\n{\r\n return 1.0 < v ? throw BadProbability{v} : Probability{v};\r\n // literals never represent negative values - no need to check for v < 0.0\r\n}\r\n\r\nusing boost::typeindex::type_id_with_cvr;\r\nint main()\r\ntry{\r\n // std::basic_string\r\n auto s1 = \"String literal\"s; // this is a std::string;\r\n auto s2 = L\"Wide string literal\"s; // this is a std::wstring;\r\n auto s3 = std::basic_string(L\"This is a very explicit wstring\");\r\n cout << \"s1:\\n\\t\" << s1 << \"\\n\\ttypeid \" << type_id_with_cvr().pretty_name() << endl;\r\n wcout << L\"s2:\\n\\t\" << s2 << L\"\\n\\ttypeid \";\r\n cout << type_id_with_cvr().pretty_name() << endl;\r\n wcout << L\"s3:\\n\\t\" << s3 << L\"\\n\\ttypeid \";\r\n cout << type_id_with_cvr().pretty_name() << endl;\r\n\r\n cout << \"\\nUser defined literals\\n\" << \"Probability\\n\" << endl;\r\n \r\n // constexpr auto prob1 = 1.2_prob; // compiletime error;\r\n\r\n // auto prob2 = 1.2_prob; // NOT a compiletimer error - throws at runtime\r\n\r\n constexpr auto prob3 = 0.3_prob; // OK\r\n cout << prob3 << endl;\r\n\r\n // constexpr auto prob4 = Probability{1.2}; // Compiletime error\r\n\r\n constexpr auto prob5 = Probability{0.5};\r\n cout << prob5 << endl;\r\n\r\n auto prob6 = 0.6_prob;\r\n cout << prob6 << endl;\r\n\r\n // auto prob7 = Probability{1.7}; // runtime error\r\n // cout << prob7 << endl;\r\n}\r\ncatch( const std::exception& e )\r\n{\r\n std::cerr << e.what () << endl;\r\n}", "meta": {"hexsha": "15ff708538d774362a16fb221b62a5ef78ab1ac4", "size": 2861, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Patterns/Misc/literals.cpp", "max_stars_repo_name": "kant/Always-be-learning", "max_stars_repo_head_hexsha": "7c3b3b4f5e8f0dfcb4d8f4b7f7428d5c8ab164c5", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Patterns/Misc/literals.cpp", "max_issues_repo_name": "kant/Always-be-learning", "max_issues_repo_head_hexsha": "7c3b3b4f5e8f0dfcb4d8f4b7f7428d5c8ab164c5", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Patterns/Misc/literals.cpp", "max_forks_repo_name": "kant/Always-be-learning", "max_forks_repo_head_hexsha": "7c3b3b4f5e8f0dfcb4d8f4b7f7428d5c8ab164c5", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.097826087, "max_line_length": 104, "alphanum_fraction": 0.6155190493, "num_tokens": 764, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.6477982179521103, "lm_q2_score": 0.7718434873426302, "lm_q1q2_score": 0.49999883563849806}} {"text": "#include \"generator.hpp\"\n\n#include \n#include \n#include \n\n#include \n\nnamespace crossbow {\n\nCrossbowRandomGenerator::CrossbowRandomGenerator (unsigned int seed) {\n\n\tthis->seed = seed;\n\tthis->rng = new rng_t(seed);\n}\n\nfloat CrossbowRandomGenerator::nextafter (const float value) {\n\n\treturn boost::math::nextafter(value, std::numeric_limits::max());\n}\n\nvoid CrossbowRandomGenerator::randomUniformFill (float *buffer, const int count, const float start, const float end) {\n\n\tif (buffer == NULL) {\n\t\tfprintf(stderr, \"error: buffer to fill must not be null\\n\");\n\t\texit (1);\n\t}\n\n\tif (count < 0) {\n\t\tfprintf(stderr, \"error: number of buffer elements to fill must be greater or equal to 0\\n\");\n\t\texit (1);\n\t}\n\n\tif (start > end) {\n\t\tfprintf(stderr, \"error: invalid uniform random distribution specification\\n\");\n\t\texit (1);\n\t}\n\n\tboost::uniform_real dist (start, nextafter (end));\n\tboost::variate_generator > variate_generator (this->rng, dist);\n\n\tfor (int i = 0; i < count; ++i)\n\t\tbuffer [i] = variate_generator ();\n\n\treturn;\n}\n\nvoid CrossbowRandomGenerator::randomGaussianFill (float *buffer, const int count, const float mean, const float std, const int truncate) {\n\n\tif (buffer == NULL) {\n\t\tfprintf(stderr, \"error: buffer to fill must not be null\\n\");\n\t\texit (1);\n\t}\n\n\tif (count < 0) {\n\t\tfprintf(stderr, \"error: number of buffer elements to fill must be greater or equal to 0\\n\");\n\t\texit (1);\n\t}\n\n\tif (std <= 0) {\n\t\tfprintf(stderr, \"error: invalid normal distribution specification\\n\");\n\t\texit (1);\n\t}\n\n\tboost::normal_distribution dist (mean, std);\n\tboost::variate_generator > variate_generator (this->rng, dist);\n\t\n\t/* float checksum = 0; */\n\n\tif (truncate) {\n\n\t\t/*\n\t\t * Added on 14 Apr 2018: Hard-coded truncated version:\n\t\t *\n\t\t * Values whose magnitude is more than 2 standard deviations\n\t\t * from the mean are dropped and re-picked.\n\t\t */\n\t\tfloat min = mean - (2 * std);\n\t\tfloat max = mean + (2 * std);\n\t\tint maxiterations = 100;\n\t\n\t\tfloat sample;\n\t\tint correct = 0;\n\t\tfor (int i = 0; i < count; ++i) {\n\t\t\tfor (int j = 0; j < maxiterations; ++j) {\n\t\t\t\tsample = variate_generator ();\n\t\t\t\tif ((sample > min) && (sample < max)) {\n\t\t\t\t\tcorrect ++;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t\tbuffer [i] = sample;\n\t\t\t/* checksum += buffer [i]; */\n\t\t}\n\t\tif (correct < count)\n\t\t\tfprintf(stderr, \"warning: only %d out of %d values truncated\\n\", correct, count);\n\t}\n\telse {\n\t\tfor (int i = 0; i < count; ++i) {\n\t\t\tbuffer [i] = variate_generator ();\n\t\t\t/* checksum += buffer [i]; */\n\t\t}\n\t}\n\t/*\n\t * fprintf(stdout, \"[DBG] checksum is %.5f\\n\", checksum);\n\t * fflush (stdout);\n\t */\n\n\treturn;\n}\n\nvoid CrossbowRandomGenerator::dump () {\n\n\tfprintf(stdout, \"CrossbowRandom (%du)\\n\", seed);\n\tfflush (stdout);\n}\n\n} /* namespace crossbow */\n", "meta": {"hexsha": "2f41febaaa7e79e2318a4a0d8ecf26f27de5f936", "size": 2962, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "clib-multigpu/random/generator.cpp", "max_stars_repo_name": "lsds/Crossbow", "max_stars_repo_head_hexsha": "d4441b35315f9f7d48293fe81faaf21e1ca48002", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 50.0, "max_stars_repo_stars_event_min_datetime": "2019-01-09T14:30:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T11:42:51.000Z", "max_issues_repo_path": "clib-multigpu/random/generator.cpp", "max_issues_repo_name": "lsds/Crossbow", "max_issues_repo_head_hexsha": "d4441b35315f9f7d48293fe81faaf21e1ca48002", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 6.0, "max_issues_repo_issues_event_min_datetime": "2019-01-18T07:31:39.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-14T21:16:53.000Z", "max_forks_repo_path": "clib-multigpu/random/generator.cpp", "max_forks_repo_name": "lsds/Crossbow", "max_forks_repo_head_hexsha": "d4441b35315f9f7d48293fe81faaf21e1ca48002", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 7.0, "max_forks_repo_forks_event_min_datetime": "2019-03-20T14:56:41.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T06:13:09.000Z", "avg_line_length": 24.8907563025, "max_line_length": 138, "alphanum_fraction": 0.6576637407, "num_tokens": 812, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7718434978390747, "lm_q2_score": 0.6477982043529715, "lm_q1q2_score": 0.4999988319416692}} {"text": "#include \"third_party/frc971/control_loops/paths/path.h\"\n#include \n\n#define _USE_MATH_DEFINES\n#include \n\n#include \"muan/logging/logger.h\"\n\nnamespace frc971 {\nnamespace control_loops {\nnamespace paths {\n\nEigen::Vector2d Projection(Eigen::Vector2d a, Eigen::Vector2d direction) {\n return a.dot(direction) * direction.dot(direction) * direction;\n}\n\nPosition FromMagDirection(double magnitude, double direction) {\n return magnitude * (Position() << ::std::cos(direction), ::std::sin(direction)).finished();\n}\n\nPose::Pose(Eigen::Vector3d values) : values_(values) {}\n\nPose::Pose(Position pos, double theta) {\n values_.block<2, 1>(0, 0) = pos;\n values_(2) = remainder(theta, 2 * M_PI);\n}\n\nPose Pose::operator+(const Pose &other) const {\n Eigen::Vector3d new_values = values_ + other.values_;\n\n // Wrap the heading into [-pi, pi]\n new_values(2) = remainder(new_values(2), 2 * M_PI);\n\n return Pose(new_values);\n}\n\nPose Pose::TranslateBy(const Position &delta) const {\n Eigen::Vector3d new_values = values_;\n new_values.block<2, 1>(0, 0) += delta;\n return Pose(new_values);\n}\n\nPose Pose::RotateBy(double theta) const {\n Eigen::Vector3d new_values = values_;\n new_values.block<2, 1>(0, 0) = Eigen::Rotation2D(theta) * new_values.block<2, 1>(0, 0);\n\n // Wrap the heading into [-pi, pi]\n new_values(2) = remainder(new_values(2) + theta, 2 * M_PI);\n\n return Pose(new_values);\n}\n\nPose Pose::operator-(const Pose &other) const {\n Eigen::Vector3d new_values = values_ - other.values_;\n\n // Wrap the heading into [-pi, pi]\n new_values(2) = remainder(new_values(2), 2 * M_PI);\n return Pose(new_values);\n}\n\nPose Pose::Compose(const Pose &other) const { return other.RotateBy(heading()).TranslateBy(translational()); }\n\nHermitePath::HermitePath(Pose initial, Pose final,\n double initial_velocity, double final_velocity, bool backwards,\n double extra_distance_initial, double extra_distance_final,\n double initial_angular_velocity,\n double final_angular_velocity)\n : HermitePath(initial.translational(),\n FromMagDirection(1, initial.heading()),\n final.translational(),\n FromMagDirection(1, final.heading()),\n initial_velocity, final_velocity, backwards,\n extra_distance_initial, extra_distance_final,\n initial_angular_velocity, final_angular_velocity) {}\n\nHermitePath::HermitePath(Position initial_position, Eigen::Vector2d initial_tangent,\n Position final_position, Eigen::Vector2d final_tangent,\n double initial_velocity, double final_velocity, bool backwards,\n double extra_distance_initial, double extra_distance_final,\n double initial_angular_velocity,\n double final_angular_velocity) {\n backwards_ = backwards;\n\n Eigen::Vector2d initial_derivative_basis;\n Eigen::Vector2d final_derivative_basis;\n double initial_deriv_magnitude;\n double final_deriv_magnitude;\n\n {\n Eigen::Vector2d distance = final_position - initial_position;\n // How far to the side are we driving, relative to initial state?\n Eigen::Vector2d sideways = distance - Projection(distance, initial_tangent);\n // How far to the front is the sideways vector, relative to final state?\n double forwards2 = sideways.dot(final_tangent);\n if (backwards) {\n forwards2 *= -1;\n }\n // Rough estimate of the curvature, this should be approximately\n // correct as long as the curvature doesn't have excessively large changes.\n double approx_curve = (sideways.norm() * 2 - forwards2) / distance.norm();\n // Standard hermite spline uses tangent * |distance|, but initial velocity\n // should be taken into account as well, although only sigmificantly if\n // distance is short and initial velocity is high. This formula was found\n // experimentally.\n initial_deriv_magnitude = distance.norm() + extra_distance_initial * 5.0 +\n initial_velocity * initial_velocity * 0.5 * approx_curve * approx_curve;\n final_deriv_magnitude = distance.norm() + extra_distance_final * 5.0 +\n final_velocity * final_velocity * 0.5 * approx_curve * approx_curve;\n }\n\n initial_derivative_basis = initial_tangent * initial_deriv_magnitude;\n final_derivative_basis = final_tangent * final_deriv_magnitude;\n\n if (backwards_) {\n initial_derivative_basis *= -1;\n final_derivative_basis *= -1;\n }\n\n Eigen::Vector2d initial_acceleration = Eigen::Vector2d::Zero();\n if (initial_velocity > 0.01 || initial_velocity < -0.01) {\n initial_acceleration =\n (Eigen::Vector2d() << -initial_tangent(1), initial_tangent(0)).finished() *\n initial_deriv_magnitude * final_deriv_magnitude *\n initial_angular_velocity / initial_velocity;\n } else if (initial_angular_velocity > 0.01 || initial_angular_velocity < -0.01) {\n LOG(WARNING, \"Initial velocity required if initial angular velocity present\"\n \" (v_0 = %f, omega_0 = %f, cutoff = 0.01)\",\n initial_velocity, initial_angular_velocity);\n }\n\n Eigen::Vector2d final_acceleration = Eigen::Vector2d::Zero();\n if (final_velocity > 0.01 || final_velocity < -0.01) {\n final_acceleration =\n (Eigen::Vector2d() << -final_tangent(1), final_tangent(0)).finished() *\n final_deriv_magnitude * final_deriv_magnitude *\n final_angular_velocity / final_velocity;\n } else if (final_angular_velocity > 0.01 || final_angular_velocity < -0.01) {\n LOG(WARNING, \"Final velocity required if final angular velocity present\"\n \" (v_f = %f, omega_f = %f, cutoff = 0.01)\",\n final_velocity, final_angular_velocity);\n }\n\n coefficients_ = Eigen::Matrix::Zero();\n coefficients_.block<2, 1>(0, 0) = initial_position;\n coefficients_.block<2, 1>(0, 1) = initial_derivative_basis;\n coefficients_.block<2, 1>(0, 2) = 0.5 * initial_acceleration;\n coefficients_.block<2, 1>(0, 3) =\n -10 * initial_position - 6 * initial_derivative_basis +\n -1.5 * initial_acceleration + 0.5 * final_acceleration +\n -4 * final_derivative_basis + 10 * final_position;\n coefficients_.block<2, 1>(0, 4) =\n 15 * initial_position + 8 * initial_derivative_basis +\n 1.5 * initial_acceleration - 1 * final_acceleration +\n 7 * final_derivative_basis - 15 * final_position;\n coefficients_.block<2, 1>(0, 5) =\n -6 * initial_position - 3 * initial_derivative_basis +\n -0.5 * initial_acceleration + 0.5 * final_acceleration +\n -3 * final_derivative_basis + 6 * final_position;\n\n for (int i = 0; i < 6; i++) {\n coefficients_.block<2, 1>(2, i) = coefficients_.block<2, 1>(0, i) * i;\n }\n\n initial_heading_ = remainder(::std::atan2(initial_tangent(1), initial_tangent(0)), 2 * M_PI);\n}\n\nvoid HermitePath::Populate(double s_min, double s_max, Pose *pose_arr, size_t arr_len) const {\n Eigen::Matrix s_polynomial_bases;\n double step = (s_max - s_min) / (arr_len - 1);\n for (size_t i = 0; i < arr_len; i++) {\n double s = s_min + i * step;\n s_polynomial_bases << 1.0, s, s * s, s * s * s, s * s * s * s, s * s * s * s * s;\n\n Eigen::Vector4d combined = coefficients_ * s_polynomial_bases;\n\n double theta;\n if (s == 0) {\n // When s is _exactly_ zero, we can't get the heading directly from\n // the derivative (because it collapses to zero)! Let's use the cached\n // value instead.\n theta = initial_heading_;\n } else {\n theta = ::std::atan2(combined(3), combined(2));\n if (backwards_) {\n if (theta > 0) {\n theta -= M_PI;\n } else {\n theta += M_PI;\n }\n }\n }\n\n pose_arr[i] = Pose(combined.block<2, 1>(0, 0), theta);\n }\n}\n\n} // namespace paths\n} // namespace control_loops\n} // namespace frc971\n", "meta": {"hexsha": "dd91c963d33f365d5969328e46dda4ede086733c", "size": 7951, "ext": "cc", "lang": "C++", "max_stars_repo_path": "third_party/frc971/control_loops/paths/path.cc", "max_stars_repo_name": "hansonl02/frc-robot-code", "max_stars_repo_head_hexsha": "4b120c917a7709df9f010c9089a87c320bab3a16", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 61.0, "max_stars_repo_stars_event_min_datetime": "2017-01-22T04:38:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T00:04:37.000Z", "max_issues_repo_path": "third_party/frc971/control_loops/paths/path.cc", "max_issues_repo_name": "hansonl02/frc-robot-code", "max_issues_repo_head_hexsha": "4b120c917a7709df9f010c9089a87c320bab3a16", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3.0, "max_issues_repo_issues_event_min_datetime": "2018-06-28T05:34:57.000Z", "max_issues_repo_issues_event_max_datetime": "2019-01-16T15:46:22.000Z", "max_forks_repo_path": "third_party/frc971/control_loops/paths/path.cc", "max_forks_repo_name": "hansonl02/frc-robot-code", "max_forks_repo_head_hexsha": "4b120c917a7709df9f010c9089a87c320bab3a16", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 17.0, "max_forks_repo_forks_event_min_datetime": "2017-05-12T15:32:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-09T12:49:38.000Z", "avg_line_length": 39.755, "max_line_length": 110, "alphanum_fraction": 0.6649478053, "num_tokens": 2037, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8128673269042768, "lm_q2_score": 0.6150878555160665, "lm_q1q2_score": 0.499984820924629}} {"text": "/*\nBSD 3-Clause License\n\nCopyright (c) 2020, The Regents of the University of Minnesota\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n*/\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \"opendb/db.h\"\n#include \"ir_solver.h\"\n#include \"node.h\"\n#include \"gmat.h\"\n#include \"get_power.h\"\n#include \"openroad/Error.hh\"\n\nusing ord::error;\nusing ord::warn;\nusing odb::dbBlock;\nusing odb::dbBox;\nusing odb::dbChip;\nusing odb::dbDatabase;\nusing odb::dbInst;\nusing odb::dbNet;\nusing odb::dbSBox;\nusing odb::dbSet;\nusing odb::dbSigType;\nusing odb::dbSWire;\nusing odb::dbTech;\nusing odb::dbTechLayer;\nusing odb::dbTechLayerDir;\nusing odb::dbVia;\nusing odb::dbViaParams;\n\nusing namespace std;\nusing std::vector;\nusing Eigen::Map;\nusing Eigen::VectorXd;\nusing Eigen::SparseMatrix; \nusing Eigen::SparseLU;\nusing Eigen::Success;\n\n\n//! Returns the created G matrix for the design\n/*\n * \\return G Matrix\n */\nGMat* IRSolver::GetGMat()\n{\n return m_Gmat;\n}\n\n\n//! Returns current map represented as a 1D vector\n/* \n * \\return J vector\n */\nvector IRSolver::GetJ()\n{\n return m_J;\n}\n\n\n//! Function to solve for voltage using SparseLU \nvoid IRSolver::SolveIR()\n{\n if(!m_connection) {\n cout<<\"WARNING: Powergrid is not connected to all instances,\"<<\n \"IR Solver may not be accurate, LVS may also fail.\"<getTech())->getDbUnitsPerMicron();\n clock_t t1, t2;\n CscMatrix* Gmat = m_Gmat->GetGMat();\n// fill A\n int nnz = Gmat->nnz;\n int m = Gmat->num_rows;\n int n = Gmat->num_cols;\n double* values = &(Gmat->values[0]);\n int* row_idx = &(Gmat->row_idx[0]);\n int* col_ptr = &(Gmat->col_ptr[0]);\n Map > A( Gmat->num_rows,\n Gmat->num_cols,\n Gmat->nnz,\n col_ptr, // read-write\n row_idx,\n values);\n \n\n vector J = GetJ();\n Map b(J.data(),J.size());\n VectorXd x;\n SparseLU > solver;\n cout << \"INFO: Factorizing G\" << endl;\n solver.compute(A);\n if(solver.info()!=Success) {\n // decomposition failed\n cout<<\"Error: LU factorization of GMatrix failed\"<GetNumNodes();\n int node_num =0;\n double sum_volt = 0;\n wc_voltage = vdd;\n while(node_num < num_nodes) {\n Node* node = m_Gmat->GetNode(node_num);\n double volt = x(node_num);\n sum_volt = sum_volt + volt;\n if (volt < wc_voltage) {\n wc_voltage = volt;\n }\n node->SetVoltage(volt);\n node_num++;\n if(node->HasInstances()) {\n NodeLoc node_loc = node->GetLoc();\n float loc_x = ((float)node_loc.first)/((float)unit_micron);\n float loc_y = ((float)node_loc.second)/((float)unit_micron);\n std::vector insts = node->GetInstances();\n std::vector::iterator inst_it;\n if (m_out_file != \"\") {\n for(inst_it = insts.begin();inst_it!=insts.end();inst_it++) {\n ir_report<<(*inst_it)->getName()<<\", \"<AddC4Bump(node_loc, it); // add the 0th bump\n m_J.push_back(voltage); // push back first vdd\n vdd = voltage;\n }\n return true;\n}\n\n\n\n//! Function that parses the Vsrc file\nvoid IRSolver::ReadC4Data()\n{\n int unit_micron = (m_db->getTech())->getDbUnitsPerMicron();\n cout << \"INFO: Reading location of VDD and VSS sources \" << endl;\n if(m_vsrc_file != \"\") {\n std::ifstream file(m_vsrc_file);\n std::string line = \"\";\n // Iterate through each line and split the content using delimiter\n while (getline(file, line)) {\n tuple c4_bump;\n int first, second, size;\n double voltage;\n stringstream X(line);\n string val;\n for (int i = 0; i < 4; ++i) {\n getline(X, val, ',');\n if (i == 0) {\n first = (int) (unit_micron * stod(val));\n } else if (i == 1) {\n second = (int) (unit_micron * stod(val));\n } else if (i == 2) {\n size = (int) (unit_micron * stod(val));\n } else {\n voltage = stod(val);\n }\n }\n m_C4Bumps.push_back(make_tuple(first, second, size, voltage));\n }\n file.close();\n }\n else {\n cout << \"Warning: Voltage pad location file not spcified, defaulting pad location to origin\" << endl;\n m_C4Bumps.push_back(make_tuple(0,0,0,0));\n }\n}\n\n\n//! Function that parses the Vsrc file\n/*void IRSolver::ReadResData()\n{\n cout << \"Default resistance file\" << m_def_res << endl;\n cout << \"INFO: Reading resistance of layers and vias \" << endl;\n std::ifstream file(m_def_res); \n std::string line = \"\";\n int line_num = 0;\n // Iterate through each line and split the content using delimiter\n while (getline(file, line)) {\n line_num ++;\n if (line_num == 1) {\n continue;\n }\n //tuple layer_res;\n int routing_level;\n double res_per_unit;\n double res_via;\n stringstream X(line);\n string val;\n for (int i = 0; i < 3; ++i) {\n getline(X, val, ',');\n if (i == 0) {\n routing_level = stoi(val);\n } else if (i == 1) {\n res_per_unit = stod(val);\n } else {\n res_via = stod(val);\n }\n }\n m_layer_res.push_back(make_tuple(routing_level, res_per_unit, res_via));\n }\n file.close();\n}\n*/\n\n//! Function to create a J vector from the current map\nbool IRSolver::CreateJ()\n{ // take current_map as an input?\n int num_nodes = m_Gmat->GetNumNodes();\n m_J.resize(num_nodes, 0);\n\n vector> power_report = GetPower();\n dbChip* chip = m_db->getChip();\n dbBlock* block = chip->getBlock();\n for (vector>::iterator it = power_report.begin();\n it != power_report.end();\n ++it) {\n dbInst* inst = block->findInst(it->first.c_str());\n if (inst == NULL) {\n cout << \"Warning instance \" << it->first << \" not found within database\"\n << endl;\n continue;\n }\n int x, y;\n inst->getLocation(x, y);\n //cout << \"Got location\" <GetNode(x, y, l,true);\n NodeLoc node_loc = node_J->GetLoc();\n if( abs(node_loc.first - x) > m_node_density || abs(node_loc.second - y) > m_node_density ){\n cout<<\"WARNING: Instance current node at \"<first <AddCurrentSrc(it->second);\n node_J->AddInstance(inst);\n }\n for (int i = 0; i < num_nodes; ++i) {\n Node* node_J = m_Gmat->GetNode(i);\n m_J[i] = -1 * (node_J->GetCurrent()); // as MNA needs negative\n // cout << m_J[i] < node_vector;\n dbTech* tech = m_db->getTech();\n //dbSet layers = tech->getLayers();\n dbSet::iterator litr;\n int unit_micron = tech->getDbUnitsPerMicron();\n int num_routing_layers = tech->getRoutingLayerCount();\n\n m_Gmat = new GMat(num_routing_layers);\n dbChip* chip = m_db->getChip();\n dbBlock* block = chip->getBlock();\n dbSet nets = block->getNets();\n std::vector vdd_nets;\n std::vector gnd_nets;\n std::vector power_nets;\n int num_wires =0;\n cout << \"Extracting power stripes on net \" << m_power_net <::iterator nIter;\n for (nIter = nets.begin(); nIter != nets.end(); ++nIter) {\n dbNet* curDnet = *nIter;\n dbSigType nType = curDnet->getSigType();\n if(m_power_net == \"VSS\") {\n if (nType == dbSigType::GROUND) {\n power_nets.push_back(curDnet);\n } else {\n continue;\n }\n } else if(m_power_net == \"VDD\") {\n if (nType == dbSigType::POWER) {\n power_nets.push_back(curDnet);\n } else {\n continue;\n }\n } else {\n cout << \"Warning: Net not specifed as VDD or VSS. Power grid checker is not run.\" <::iterator vIter;\n for (vIter = power_nets.begin(); vIter != power_nets.end(); ++vIter) {\n dbNet* curDnet = *vIter;\n dbSet swires = curDnet->getSWires();\n dbSet::iterator sIter;\n for (sIter = swires.begin(); sIter != swires.end(); ++sIter) {\n dbSWire* curSWire = *sIter;\n dbSet wires = curSWire->getWires();\n dbSet::iterator wIter;\n for (wIter = wires.begin(); wIter != wires.end(); ++wIter) {\n num_wires++;\n dbSBox* curWire = *wIter;\n int l;\n dbTechLayerDir::Value layer_dir; \n if (curWire->isVia()) {\n dbVia* via = curWire->getBlockVia();\n dbTechLayer* via_layer = via->getTopLayer();\n l = via_layer->getRoutingLevel();\n layer_dir = via_layer->getDirection();\n } else {\n dbTechLayer* wire_layer = curWire->getTechLayer();\n l = wire_layer->getRoutingLevel();\n layer_dir = wire_layer->getDirection();\n if (l < m_bottom_layer) {\n m_bottom_layer = l ; \n m_bottom_layer_dir = layer_dir;\n }\n }\n if (l > m_top_layer) {\n m_top_layer = l ; \n m_top_layer_dir = layer_dir;\n }\n }\n }\n }\n cout<<\"Creating Nodes: \";\n int progress_wires=0;\n int progress_percent=1;\n for (vIter = power_nets.begin(); vIter != power_nets.end(); ++vIter) {\n dbNet* curDnet = *vIter;\n dbSet swires = curDnet->getSWires();\n dbSet::iterator sIter;\n for (sIter = swires.begin(); sIter != swires.end(); ++sIter) {\n dbSWire* curSWire = *sIter;\n dbSet wires = curSWire->getWires();\n dbSet::iterator wIter;\n for (wIter = wires.begin(); wIter != wires.end(); ++wIter) {\n if(progress_wires >= ((progress_percent/100.0)*num_wires)-1.0 ){\n cout<<\"\\b\\b\\b\\b\"<isVia()) {\n dbVia* via = curWire->getBlockVia();\n dbBox* via_bBox = via->getBBox();\n int check_params = via->hasParams();\n int x_cut_size = 0;\n int y_cut_size = 0;\n int x_bottom_enclosure = 0;\n int y_bottom_enclosure = 0;\n int x_top_enclosure = 0;\n int y_top_enclosure = 0;\n\t\t if(check_params == 1) {\n\t\t dbViaParams params;\n\t\t\tvia->getViaParams(params);\n\t\t x_cut_size = params.getXCutSize();\n\t\t y_cut_size = params.getYCutSize();\n x_bottom_enclosure = params.getXBottomEnclosure();\n y_bottom_enclosure = params.getYBottomEnclosure();\n x_top_enclosure = params.getXTopEnclosure();\n y_top_enclosure = params.getYTopEnclosure();\n\t\t }\n BBox bBox = make_pair((via_bBox->getDX()) / 2, (via_bBox->getDY()) / 2);\n int x, y;\n curWire->getViaXY(x, y);\n dbTechLayer* via_layer = via->getBottomLayer();\n dbTechLayerDir::Value layer_dir = via_layer->getDirection();\n int l = via_layer->getRoutingLevel();\n int x_loc1,x_loc2,y_loc1,y_loc2;\n if (m_bottom_layer != l && l != m_top_layer) {//do not set for top and bottom layers\n if (layer_dir == dbTechLayerDir::Value::HORIZONTAL) {\n y_loc1 = y;\n y_loc2 = y;\n x_loc1 = x - (x_bottom_enclosure+x_cut_size/2);\n x_loc2 = x + (x_bottom_enclosure+x_cut_size/2);\n } else {\n y_loc1 = y - (y_bottom_enclosure+y_cut_size/2);\n y_loc2 = y + (y_bottom_enclosure+y_cut_size/2);\n x_loc1 = x;\n x_loc2 = x;\n }\n m_Gmat->SetNode(x_loc1, y_loc1, l, make_pair(0, 0));\n m_Gmat->SetNode(x_loc2, y_loc2, l, make_pair(0, 0));\n m_Gmat->SetNode(x, y, l, bBox);\n }\n via_layer = via->getTopLayer();\n l = via_layer->getRoutingLevel();\n\n //TODO this may count the stripe conductance twice but is needed to\n //fix a staggered stacked via\n layer_dir = via_layer->getDirection();\n if (m_bottom_layer != l && l != m_top_layer) {//do not set for top and bottom layers\n if (layer_dir == dbTechLayerDir::Value::HORIZONTAL) {\n y_loc1 = y;\n y_loc2 = y;\n x_loc1 = x - (x_top_enclosure+x_cut_size/2);\n x_loc2 = x + (x_top_enclosure+x_cut_size/2);\n } else {\n y_loc1 = y - (y_top_enclosure+y_cut_size/2);\n y_loc2 = y + (y_top_enclosure+y_cut_size/2);\n x_loc1 = x;\n x_loc2 = x;\n }\n m_Gmat->SetNode(x_loc1, y_loc1, l, make_pair(0, 0));\n m_Gmat->SetNode(x_loc2, y_loc2, l, make_pair(0, 0));\n m_Gmat->SetNode(x, y, l, bBox);\n }\n } else {\n int x_loc1, x_loc2, y_loc1, y_loc2;\n dbTechLayer* wire_layer = curWire->getTechLayer();\n int l = wire_layer->getRoutingLevel();\n dbTechLayerDir::Value layer_dir = wire_layer->getDirection();\n if(l == m_bottom_layer){\n layer_dir = dbTechLayerDir::Value::HORIZONTAL;\n }\n if (layer_dir == dbTechLayerDir::Value::HORIZONTAL) {\n y_loc1 = (curWire->yMin() + curWire->yMax()) / 2;\n y_loc2 = (curWire->yMin() + curWire->yMax()) / 2;\n x_loc1 = curWire->xMin();\n x_loc2 = curWire->xMax();\n } else {\n x_loc1 = (curWire->xMin() + curWire->xMax()) / 2;\n x_loc2 = (curWire->xMin() + curWire->xMax()) / 2;\n y_loc1 = curWire->yMin();\n y_loc2 = curWire->yMax();\n }\n if (l == m_bottom_layer || l == m_top_layer) { // special case for bottom and top layers we design a dense grid\n if (layer_dir == dbTechLayerDir::Value::HORIZONTAL ) {\n int x_i;\n x_loc1 = (x_loc1/m_node_density)*m_node_density; //quantize the horizontal direction\n x_loc2 = (x_loc2/m_node_density)*m_node_density; //quantize the horizontal direction\n for (x_i = x_loc1; x_i <= x_loc2; x_i = x_i + m_node_density) {\n m_Gmat->SetNode(x_i, y_loc1, l, make_pair(0, 0));\n }\n } else {\n y_loc1 = (y_loc1/m_node_density)*m_node_density; //quantize the vertical direction\n y_loc2 = (y_loc2/m_node_density)*m_node_density; //quantize the vertical direction\n int y_i;\n for (y_i = y_loc1; y_i <= y_loc2; y_i = y_i + m_node_density) {\n m_Gmat->SetNode(x_loc1, y_i, l, make_pair(0, 0));\n }\n }\n } else { // add end nodes\n m_Gmat->SetNode(x_loc1, y_loc1, l, make_pair(0, 0));\n m_Gmat->SetNode(x_loc2, y_loc2, l, make_pair(0, 0));\n }\n }\n }\n }\n }\n cout<(m_C4Bumps[it]);\n int y = get<1>(m_C4Bumps[it]);\n int size = get<2>(m_C4Bumps[it]);\n double v = get<3>(m_C4Bumps[it]);\n std::vector RDL_nodes;\n RDL_nodes = m_Gmat->GetRDLNodes(m_top_layer, \n m_top_layer_dir,\n x-size/2, \n x+size/2,\n y-size/2,\n y+size/2);\n if (RDL_nodes.empty() == true) {\n Node* node = m_Gmat->GetNode(x,y,m_top_layer,true);\n NodeLoc node_loc = node->GetLoc();\n double new_loc1 = ((double)node_loc.first) /((double) unit_micron);\n double new_loc2 = ((double)node_loc.second) /((double) unit_micron);\n double old_loc1 = ((double)x) /((double) unit_micron);\n double old_loc2 = ((double)y) /((double) unit_micron);\n double old_size = ((double)size) /((double) unit_micron);\n cout<<\"WARNING: Vsrc location at x=\"<GetRDLNodes(m_top_layer, \n m_top_layer_dir,\n node_loc.first-size/2, \n node_loc.first+size/2,\n node_loc.second-size/2,\n node_loc.second+size/2);\n\n }\n vector::iterator node_it;\n for(node_it = RDL_nodes.begin(); node_it != RDL_nodes.end(); ++node_it) {\n Node* node = *node_it;\n m_C4Nodes.push_back(make_pair(node->GetGLoc(),v));\n num_C4++;\n }\n }\n // All new nodes must be inserted by this point\n // initialize G Matrix\n\n cout << \"INFO: Number of nodes on net \" << m_power_net <<\" =\" << m_Gmat->GetNumNodes() << endl;\n cout << \"Creating Connections: \";\n m_Gmat->InitializeGmatDok(num_C4);\n int err_flag_via = 1;\n int err_flag_layer = 1;\n for (vIter = power_nets.begin(); vIter != power_nets.end();\n ++vIter) { // only 1 is expected?\n dbNet* curDnet = *vIter;\n dbSet swires = curDnet->getSWires();\n dbSet::iterator sIter;\n for (sIter = swires.begin(); sIter != swires.end();\n ++sIter) { // only 1 is expected?\n dbSWire* curSWire = *sIter;\n dbSet wires = curSWire->getWires();\n dbSet::iterator wIter;\n for (wIter = wires.begin(); wIter != wires.end(); ++wIter) {\n if(progress_wires >= ((progress_percent/100.0)*num_wires)-1.0 ){\n cout<<\"\\b\\b\\b\\b\"<isVia()) {\n dbVia* via = curWire->getBlockVia();\n int num_via_rows = 1;\n int num_via_cols = 1;\n int check_params = via->hasParams();\n int x_cut_size = 0;\n int y_cut_size = 0;\n int x_bottom_enclosure = 0;\n int y_bottom_enclosure = 0;\n int x_top_enclosure = 0;\n int y_top_enclosure = 0;\n\t\t if(check_params == 1) {\n\t\t dbViaParams params;\n\t\t\tvia->getViaParams(params);\n\t\t num_via_rows = params.getNumCutRows();\n\t\t num_via_cols = params.getNumCutCols();\n\t\t x_cut_size = params.getXCutSize();\n\t\t y_cut_size = params.getYCutSize();\n x_bottom_enclosure = params.getXBottomEnclosure();\n y_bottom_enclosure = params.getYBottomEnclosure();\n x_top_enclosure = params.getXTopEnclosure();\n y_top_enclosure = params.getYTopEnclosure();\n\t\t }\n dbBox* via_bBox = via->getBBox();\n BBox bBox\n = make_pair((via_bBox->getDX()) / 2, (via_bBox->getDY()) / 2);\n int x, y;\n curWire->getViaXY(x, y);\n dbTechLayer* via_layer = via->getBottomLayer();\n int l = via_layer->getRoutingLevel();\n\n double R = via_layer->getUpperLayer()->getResistance();\n R = R/(num_via_rows * num_via_cols);\n if (R == 0.0) {\n err_flag_via = 0;\n //R = get<2>(m_layer_res[l]); /// Must figure out via resistance value\n //cout << \"Via Resistance\" << R << endl;\n }\n bool top_or_bottom = ((l == m_bottom_layer) || (l == m_top_layer));\n Node* node_bot = m_Gmat->GetNode(x, y, l,top_or_bottom);\n NodeLoc node_loc = node_bot->GetLoc();\n if( abs(node_loc.first - x) > m_node_density || abs(node_loc.second - y) > m_node_density ){\n cout<<\"WARNING: Node at \"<getTopLayer();\n l = via_layer->getRoutingLevel();\n top_or_bottom = ((l == m_bottom_layer) || (l == m_top_layer));\n Node* node_top = m_Gmat->GetNode(x, y, l,top_or_bottom);\n node_loc = node_top->GetLoc();\n if( abs(node_loc.first - x) > m_node_density || abs(node_loc.second - y) > m_node_density ){\n cout<<\"WARNING: Node at \"<SetConductance(node_bot, node_top, 0);\n } else {\n m_Gmat->SetConductance(node_bot, node_top, 1 / R);\n }\n }\n \n via_layer = via->getBottomLayer();\n dbTechLayerDir::Value layer_dir = via_layer->getDirection();\n l = via_layer->getRoutingLevel();\n if(l != m_bottom_layer) {\n double rho = via_layer->getResistance()\n * double(via_layer->getWidth())\n / double(unit_micron);\n if (rho <= 1e-12) {\n rho = 0;\n err_flag_layer = 0;\n }\n int x_loc1,x_loc2,y_loc1,y_loc2;\n if (layer_dir == dbTechLayerDir::Value::HORIZONTAL) {\n y_loc1 = y - y_cut_size/2;\n y_loc2 = y + y_cut_size/2;\n x_loc1 = x - (x_bottom_enclosure+x_cut_size/2);\n x_loc2 = x + (x_bottom_enclosure+x_cut_size/2);\n } else {\n y_loc1 = y - (y_bottom_enclosure+y_cut_size/2);\n y_loc2 = y + (y_bottom_enclosure+y_cut_size/2);\n x_loc1 = x - x_cut_size/2;\n x_loc2 = x + x_cut_size/2;\n }\n m_Gmat->GenerateStripeConductance(via_layer->getRoutingLevel(),\n layer_dir,\n x_loc1,\n x_loc2,\n y_loc1,\n y_loc2,\n rho);\n }\n via_layer = via->getTopLayer();\n layer_dir = via_layer->getDirection();\n l = via_layer->getRoutingLevel();\n if(l != m_top_layer) {\n double rho = via_layer->getResistance()\n * double(via_layer->getWidth())\n / double(unit_micron);\n if (rho <= 1e-12) {\n rho = 0;\n err_flag_layer = 0;\n }\n int x_loc1,x_loc2,y_loc1,y_loc2;\n if (layer_dir == dbTechLayerDir::Value::HORIZONTAL) {\n y_loc1 = y - y_cut_size/2;\n y_loc2 = y + y_cut_size/2;\n x_loc1 = x - (x_top_enclosure+x_cut_size/2);\n x_loc2 = x + (x_top_enclosure+x_cut_size/2);\n } else {\n y_loc1 = y - (y_top_enclosure+y_cut_size/2);\n y_loc2 = y + (y_top_enclosure+y_cut_size/2);\n x_loc1 = x - x_cut_size/2;\n x_loc2 = x + x_cut_size/2;\n }\n m_Gmat->GenerateStripeConductance(via_layer->getRoutingLevel(),\n layer_dir,\n x_loc1,\n x_loc2,\n y_loc1,\n y_loc2,\n rho);\n }\n\n\n } else {\n dbTechLayer* wire_layer = curWire->getTechLayer();\n int l = wire_layer->getRoutingLevel();\n double rho = wire_layer->getResistance()\n * double(wire_layer->getWidth())\n / double(unit_micron);\n if (rho <= 1e-12) {\n rho = 0;\n err_flag_layer = 0;\n }\n dbTechLayerDir::Value layer_dir = wire_layer->getDirection();\n if (l == m_bottom_layer){//ensure that the bootom layer(rail) is horizontal\n layer_dir = dbTechLayerDir::Value::HORIZONTAL;\n }\n int x_loc1 = curWire->xMin();\n int x_loc2 = curWire->xMax();\n int y_loc1 = curWire->yMin();\n int y_loc2 = curWire->yMax();\n if (l == m_bottom_layer || l == m_top_layer) { // special case for bottom and top layers we design a dense grid\n if (layer_dir == dbTechLayerDir::Value::HORIZONTAL ) {\n x_loc1 = (x_loc1/m_node_density)*m_node_density; //quantize the horizontal direction\n x_loc2 = (x_loc2/m_node_density)*m_node_density; //quantize the horizontal direction\n } else {\n y_loc1 = (y_loc1/m_node_density)*m_node_density; //quantize the vertical direction\n y_loc2 = (y_loc2/m_node_density)*m_node_density; //quantize the vertical direction\n }\n }\n m_Gmat->GenerateStripeConductance(wire_layer->getRoutingLevel(),\n layer_dir,\n x_loc1, \n x_loc2,\n y_loc1,\n y_loc2,\n rho);\n }\n }\n }\n }\n cout<>::iterator c4_node_it;\n int x,y;\n CscMatrix* Amat = m_Gmat->GetAMat();\n int num_nodes = m_Gmat->GetNumNodes();\n\n dbTech* tech = m_db->getTech();\n int unit_micron = tech->getDbUnitsPerMicron();\n\n for(c4_node_it = m_C4Nodes.begin(); c4_node_it != m_C4Nodes.end() ; c4_node_it++){\n Node* c4_node = m_Gmat->GetNode((*c4_node_it).first);\n std::queue node_q;\n node_q.push(c4_node);\n while(!node_q.empty()) {\n NodeIdx col_loc, n_col_loc;\n Node* node = node_q.front();\n node_q.pop();\n node->SetConnected();\n NodeIdx col_num = node->GetGLoc();\n col_loc = Amat->col_ptr[col_num];\n if(col_num < Amat->col_ptr.size()-1) {\n n_col_loc = Amat->col_ptr[col_num+1];\n } else {\n n_col_loc = Amat->row_idx.size() ;\n }\n std::vector col_vec(Amat->row_idx.begin()+col_loc,\n Amat->row_idx.begin()+n_col_loc);\n\n\n std::vector::iterator col_vec_it;\n for(col_vec_it = col_vec.begin(); col_vec_it != col_vec.end(); col_vec_it++){\n if(*col_vec_itGetNode(*col_vec_it);\n if(!(node_next->GetConnected())) {\n node_q.push(node_next);\n }\n }\n }\n }\n }\n int uncon_err_cnt = 0;\n int uncon_err_flag = 0;\n int uncon_inst_cnt = 0;\n int uncon_inst_flag = 0;\n std::vector node_list = m_Gmat->GetAllNodes();\n std::vector::iterator node_list_it;\n bool unconnected_node =false;\n for(node_list_it = node_list.begin(); node_list_it != node_list.end(); node_list_it++){\n if(!(*node_list_it)->GetConnected()){\n uncon_err_cnt++;\n NodeLoc node_loc = (*node_list_it)->GetLoc();\n float loc_x = ((float)node_loc.first)/((float)unit_micron);\n float loc_y = ((float)node_loc.second)/((float)unit_micron);\n\n //if(uncon_err_cnt>25 && uncon_err_flag ==0 ) {\n // uncon_err_flag =1;\n // cout<<\"Error display limit reached, suppressing further unconnected node error messages\"<GetLayerNum()<25 && uncon_inst_flag ==0 ) {\n // uncon_inst_flag =1;\n // cout<<\"Error display limit reached, suppressing further unconnected instance error messages\"<HasInstances()){\n std::vector insts = (*node_list_it)->GetInstances();\n std::vector::iterator inst_it;\n for(inst_it = insts.begin();inst_it!=insts.end();inst_it++) {\n uncon_inst_cnt++;\n cout<<\"Warning: Instance: \"<< (*inst_it)->getName() <<\"at location x:\"<GetLayerNum()<> IRSolver::GetPower()\n{\n PowerInst power_inst;\n vector> power_report = power_inst.executePowerPerInst(\n m_sta);\n\n return power_report;\n}\n\nbool IRSolver::GetResult(){\n return m_result; \n}\n\nint IRSolver::PrintSpice() {\n DokMatrix* Gmat = m_Gmat->GetGMatDOK();\n map::iterator it;\n \n ofstream pdnsim_spice_file;\n pdnsim_spice_file.open (m_spice_out_file);\n if (!pdnsim_spice_file.is_open()) {\n cout << \"File did not open\" << endl;\n return 0;\n }\n vector J = GetJ();\n int num_nodes = m_Gmat->GetNumNodes();\n int resistance_number = 0;\n int voltage_number = 0;\n int current_number = 0; \n\n NodeLoc node_loc;\n for(it = Gmat->values.begin(); it!= Gmat->values.end(); it++){\n NodeIdx col = (it->first).first;\n NodeIdx row = (it->first).second;\n if(col <= row) {\n continue; //ignore lower half and diagonal as matrix is symmetric\n }\n double cond = it->second; // get cond value\n if(abs(cond) < 1e-15){ //ignore if an empty cell\n continue;\n }\n\n string net_name = \"vdd\";\n if(col < num_nodes) { //resistances\n double resistance = -1/cond;\n\n Node* node1 = m_Gmat->GetNode(col); \n Node* node2 = m_Gmat->GetNode(row); \n node_loc = node1->GetLoc();\n int x1 = node_loc.first;\n int y1 = node_loc.second;\n int l1 = node1->GetLayerNum();\n string node1_name = net_name + \"_\" + to_string(x1) + \"_\" + to_string(y1) + \"_\" + to_string(l1);\n\n node_loc = node2->GetLoc();\n int x2 = node_loc.first;\n int y2 = node_loc.second;\n int l2 = node2->GetLayerNum();\n string node2_name = net_name + \"_\" + to_string(x2) + \"_\" + to_string(y2) + \"_\" + to_string(l2);\n \n string resistance_name = \"R\" + to_string(resistance_number); \n resistance_number++;\n\n pdnsim_spice_file<< resistance_name <<\" \"<< node1_name << \" \" << node2_name <<\" \"<< to_string(resistance) <GetCurrent();\n string current_name = \"I\" + to_string(current_number); \n if(abs(current)> 1e-18) {\n pdnsim_spice_file<< current_name <<\" \"<< node1_name << \" \" << 0 <<\" \"<< current <GetNode(row); //VDD location \n node_loc = node1->GetLoc();\n double voltage = J[col];\n int x1 = node_loc.first;\n int y1 = node_loc.second;\n int l1 = node1->GetLayerNum();\n string node1_name = net_name + \"_\" + to_string(x1) + \"_\" + to_string(y1) + \"_\" + to_string(l1);\n string voltage_name = \"V\" + to_string(voltage_number); \n voltage_number++;\n pdnsim_spice_file<< voltage_name <<\" \"<< node1_name << \" 0 \" << to_string(voltage) <GenerateCSCMatrix();\n res = m_Gmat->GenerateACSCMatrix();\n }\n if(res) {\n m_connection = CheckConnectivity();\n res = m_connection;\n }\n m_result = res;\n return m_result;\n}\n\nbool IRSolver::BuildConnection() {\n bool res = true;\n ReadC4Data();\n if(res) {\n res = CreateGmat(true); \n }\n if(res) {\n res = AddC4Bump();\n }\n if(res) {\n res = m_Gmat->GenerateACSCMatrix();\n }\n if(res) {\n m_connection = CheckConnectivity();\n res = m_connection;\n }\n m_result = res;\n return m_result;\n}\n \n", "meta": {"hexsha": "29db40c2bb38f7b762570273a03e1c770d4fadec", "size": 37016, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/PDNSim/src/ir_solver.cpp", "max_stars_repo_name": "tgingold/OpenROAD", "max_stars_repo_head_hexsha": "c37064854166551adb257ef8c4aa438f9cec5493", "max_stars_repo_licenses": ["BSD-3-Clause-Clear"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-01-14T06:27:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-14T06:27:26.000Z", "max_issues_repo_path": "src/PDNSim/src/ir_solver.cpp", "max_issues_repo_name": "tgingold/OpenROAD", "max_issues_repo_head_hexsha": "c37064854166551adb257ef8c4aa438f9cec5493", "max_issues_repo_licenses": ["BSD-3-Clause-Clear"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/PDNSim/src/ir_solver.cpp", "max_forks_repo_name": "tgingold/OpenROAD", "max_forks_repo_head_hexsha": "c37064854166551adb257ef8c4aa438f9cec5493", "max_forks_repo_licenses": ["BSD-3-Clause-Clear"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-01-14T06:27:38.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-14T06:27:38.000Z", "avg_line_length": 36.685827552, "max_line_length": 140, "alphanum_fraction": 0.5620002161, "num_tokens": 9904, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8128673269042767, "lm_q2_score": 0.6150878555160665, "lm_q1q2_score": 0.4999848209246289}} {"text": "//==================================================================================================\n/*!\n @file\n\n @copyright 2016 NumScale SAS\n @copyright 2016 J.T. Lapreste\n\n Distributed under the Boost Software License, Version 1.0.\n (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)\n*/\n//==================================================================================================\n#ifndef BOOST_SIMD_FUNCTION_DOT_HPP_INCLUDED\n#define BOOST_SIMD_FUNCTION_DOT_HPP_INCLUDED\n\n#if defined(DOXYGEN_ONLY)\nnamespace boost { namespace simd\n{\n\n /*!\n\n @ingroup group-reduction\n Function object implementing dot capabilities\n\n returns the dot product of the two vector arguments\n\n @par Semantic:\n\n For every parameters of type T:\n\n @code\n scalar_of_t r = dot(x,y);\n @endcode\n\n is similar to:\n\n @code\n scalar_of_t r = sum(x*conj(y));\n @endcode\n\n **/\n const boost::dispatch::functor dot = {};\n} }\n#endif\n\n#include \n#include \n\n#endif\n", "meta": {"hexsha": "18ebfb9bb68cdee48ced1fb87c419b41fe004709", "size": 1087, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/boost/simd/function/dot.hpp", "max_stars_repo_name": "yaeldarmon/boost.simd", "max_stars_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/boost/simd/function/dot.hpp", "max_issues_repo_name": "yaeldarmon/boost.simd", "max_issues_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/boost/simd/function/dot.hpp", "max_forks_repo_name": "yaeldarmon/boost.simd", "max_forks_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.1836734694, "max_line_length": 100, "alphanum_fraction": 0.5722171113, "num_tokens": 236, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8128673133042217, "lm_q2_score": 0.6150878555160665, "lm_q1q2_score": 0.49998481255940025}} {"text": "//==============================================================================\n// Copyright 2003 - 2012 LASMEA UMR 6602 CNRS/Univ. Clermont II\n// Copyright 2009 - 2012 LRI UMR 8623 CNRS/Univ Paris Sud XI\n//\n// Distributed under the Boost Software License, Version 1.0.\n// See accompanying file LICENSE.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt\n//==============================================================================\n/*!\n * \\file\n**/\n#ifndef BOOST_SIMD_SWAR_FUNCTIONS_CUMTRAPZ_HPP_INCLUDED\n#define BOOST_SIMD_SWAR_FUNCTIONS_CUMTRAPZ_HPP_INCLUDED\n#include \n#include \n#include \n#include \n\n\n/*!\n * \\ingroup boost_simd_swar\n * \\defgroup boost_simd_swar_cumtrapz cumtrapz\n *\n * \\par Description\n * compute the cumulate trapz of the vector elements using the abscissae differences\n * is they are given\n * z = cumtrapz(y) computes an approximation of the cumulative\n * integral of y via the trapezoidal method (with unit spacing). to\n * compute the integral for spacing different from one, multiply z by\n * the spacing incrementor use cumtrapz(dx, y) where dx is the abscisae\n * constant and SCALAR increment.\n *\n * for vectors, cumtrapz(y) is a vector containing the cumulative\n * integral of y. for matrices, cumtrapz(y) is a matrix the same size as\n * x with the cumulative integral over each column. for n-d arrays,\n * cumtrapz(y) works along the first non-singleton dimension.\n *\n * z = cumtrapz(x,y) computes the cumulative integral of y with respect\n * to x using trapezoidal integration. x and y must be vectors of the\n * same length, or x must be a column vector and y an array whose first\n * non-singleton dimension is length(x). cumtrapz operates across this\n * dimension.\n * if x is scalar the increment is considered constant and of value x.\n * (A 1x1 matrix expression is not a scalar)\n *\n * z = cumtrapz(x,y,dim) or cumtrapz(y,dim) integrates along dimension\n * dim of y. the length of x must be the same as size(y,dim)).\n *\n * \\par Header file\n *\n * \\code\n * #include \n * \\endcode\n *\n *\n * \\synopsis\n *\n * \\code\n * namespace boost::simd\n * {\n * template \n * meta::call::type\n * cumtrapz(const A0 & x, const A1 & y, const A2 & dim);\n * }\n * \\endcode\n *\n *\n**/\n\nnamespace boost { namespace simd { namespace tag\n {\n /*!\n * \\brief Define the tag cumtrapz_ of functor cumtrapz\n * in namespace boost::simd::tag for toolbox boost.simd.swar\n **/\n struct cumtrapz_ : tag::formal_\n {\n typedef tag::formal_ parent;\n };\n }\n BOOST_DISPATCH_FUNCTION_IMPLEMENTATION(tag::cumtrapz_, cumtrapz, 1)\n BOOST_DISPATCH_FUNCTION_IMPLEMENTATION(tag::cumtrapz_, cumtrapz, 2)\n BOOST_DISPATCH_FUNCTION_IMPLEMENTATION(tag::cumtrapz_, cumtrapz, 3)\n} }\n\n#endif\n\n// modified by jt the 25/12/2010\n", "meta": {"hexsha": "1bf79349a2727faf110995842055bce26a164063", "size": 3054, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "modules/boost/simd/swar/include/boost/simd/swar/functions/cumtrapz.hpp", "max_stars_repo_name": "pbrunet/nt2", "max_stars_repo_head_hexsha": "2aeca0f6a315725b335efd5d9dc95d72e10a7fb7", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "modules/boost/simd/swar/include/boost/simd/swar/functions/cumtrapz.hpp", "max_issues_repo_name": "pbrunet/nt2", "max_issues_repo_head_hexsha": "2aeca0f6a315725b335efd5d9dc95d72e10a7fb7", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "modules/boost/simd/swar/include/boost/simd/swar/functions/cumtrapz.hpp", "max_forks_repo_name": "pbrunet/nt2", "max_forks_repo_head_hexsha": "2aeca0f6a315725b335efd5d9dc95d72e10a7fb7", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3146067416, "max_line_length": 84, "alphanum_fraction": 0.6640471513, "num_tokens": 806, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES\n\n", "lm_q1_score": 0.8128673087708699, "lm_q2_score": 0.6150878555160666, "lm_q1q2_score": 0.4999848097709907}} {"text": "//\n// Copyright (c) 2019-2020 INRIA\n//\n\n#include \"pinocchio/autodiff/casadi.hpp\"\n\n#include \"pinocchio/algorithm/rnea.hpp\"\n#include \"pinocchio/algorithm/rnea-derivatives.hpp\"\n#include \"pinocchio/algorithm/aba.hpp\"\n#include \"pinocchio/algorithm/aba-derivatives.hpp\"\n#include \"pinocchio/algorithm/joint-configuration.hpp\"\n\n#include \"pinocchio/parsers/sample-models.hpp\"\n\n#include \n\n#include \n#include \n\nBOOST_AUTO_TEST_SUITE(BOOST_TEST_MODULE)\n\nBOOST_AUTO_TEST_CASE(test_integrate)\n{\n typedef double Scalar;\n typedef casadi::SX ADScalar;\n \n typedef pinocchio::ModelTpl Model;\n typedef Model::Data Data;\n \n typedef pinocchio::ModelTpl ADModel;\n typedef ADModel::Data ADData;\n \n Model model;\n pinocchio::buildModels::humanoidRandom(model);\n model.lowerPositionLimit.head<3>().fill(-1.);\n model.upperPositionLimit.head<3>().fill(1.);\n Data data(model);\n \n typedef Model::ConfigVectorType ConfigVector;\n typedef Model::TangentVectorType TangentVector;\n ConfigVector q(model.nq);\n q = pinocchio::randomConfiguration(model);\n TangentVector v(TangentVector::Random(model.nv));\n TangentVector a(TangentVector::Random(model.nv));\n \n typedef ADModel::ConfigVectorType ConfigVectorAD;\n ADModel ad_model = model.cast();\n ADData ad_data(ad_model);\n \n pinocchio::rnea(model,data,q,v,a);\n \n casadi::SX cs_q = casadi::SX::sym(\"q\", model.nq);\n casadi::SX cs_v_int = casadi::SX::sym(\"v_inc\", model.nv);\n \n ConfigVectorAD q_ad(model.nq), v_int_ad(model.nv), q_int_ad(model.nq);\n q_ad = Eigen::Map(static_cast< std::vector >(cs_q).data(),model.nq,1);\n v_int_ad = Eigen::Map(static_cast< std::vector >(cs_v_int).data(),model.nv,1);\n \n pinocchio::integrate(ad_model,q_ad,v_int_ad,q_int_ad);\n casadi::SX cs_q_int(model.nq,1);\n pinocchio::casadi::copy(q_int_ad,cs_q_int);\n \n std::cout << \"cs_q_int:\" << cs_q_int << std::endl;\n casadi::Function eval_integrate(\"eval_integrate\",\n casadi::SXVector {cs_q,cs_v_int},\n casadi::SXVector {cs_q_int});\n std::vector q_vec((size_t)model.nq);\n Eigen::Map(q_vec.data(),model.nq,1) = q;\n \n std::vector v_int_vec((size_t)model.nv);\n Eigen::Map(v_int_vec.data(),model.nv,1).setZero();\n casadi::DM q_int_res = eval_integrate(casadi::DMVector {q_vec,v_int_vec})[0];\n \n Data::ConfigVectorType q_int_vec = Eigen::Map(static_cast< std::vector >(q_int_res).data(),model.nq,1);\n \n ConfigVector q_plus(model.nq);\n pinocchio::integrate(model,q,TangentVector::Zero(model.nv),q_plus);\n \n std::cout << \"q_int_vec: \" << q_int_vec.transpose() << std::endl;\n BOOST_CHECK(q_plus.isApprox(q_int_vec));\n}\n \nBOOST_AUTO_TEST_CASE(test_rnea_derivatives)\n{\n typedef double Scalar;\n typedef casadi::SX ADScalar;\n \n typedef pinocchio::ModelTpl Model;\n typedef Model::Data Data;\n \n typedef pinocchio::ModelTpl ADModel;\n typedef ADModel::Data ADData;\n \n Model model;\n pinocchio::buildModels::humanoidRandom(model);\n model.lowerPositionLimit.head<3>().fill(-1.);\n model.upperPositionLimit.head<3>().fill(1.);\n Data data(model);\n \n typedef Model::ConfigVectorType ConfigVector;\n typedef Model::TangentVectorType TangentVector;\n ConfigVector q(model.nq);\n q = pinocchio::randomConfiguration(model);\n TangentVector v(TangentVector::Random(model.nv));\n TangentVector a(TangentVector::Random(model.nv));\n \n typedef ADModel::ConfigVectorType ConfigVectorAD;\n typedef ADModel::TangentVectorType TangentVectorAD;\n ADModel ad_model = model.cast();\n ADData ad_data(ad_model);\n \n pinocchio::rnea(model,data,q,v,a);\n \n casadi::SX cs_q = casadi::SX::sym(\"q\", model.nq);\n casadi::SX cs_v_int = casadi::SX::sym(\"v_inc\", model.nv);\n \n ConfigVectorAD q_ad(model.nq), v_int_ad(model.nv), q_int_ad(model.nq);\n q_ad = Eigen::Map(static_cast< std::vector >(cs_q).data(),model.nq,1);\n v_int_ad = Eigen::Map(static_cast< std::vector >(cs_v_int).data(),model.nv,1);\n \n pinocchio::integrate(ad_model,q_ad,v_int_ad,q_int_ad);\n casadi::SX cs_q_int(model.nq,1);\n pinocchio::casadi::copy(q_int_ad,cs_q_int);\n std::vector q_vec((size_t)model.nq);\n Eigen::Map(q_vec.data(),model.nq,1) = q;\n \n std::vector v_int_vec((size_t)model.nv);\n Eigen::Map(v_int_vec.data(),model.nv,1).setZero();\n \n casadi::SX cs_v = casadi::SX::sym(\"v\", model.nv);\n TangentVectorAD v_ad(model.nv);\n v_ad = Eigen::Map(static_cast< std::vector >(cs_v).data(),model.nv,1);\n \n casadi::SX cs_a = casadi::SX::sym(\"a\", model.nv);\n TangentVectorAD a_ad(model.nv);\n a_ad = Eigen::Map(static_cast< std::vector >(cs_a).data(),model.nv,1);\n \n rnea(ad_model,ad_data,q_int_ad,v_ad,a_ad);\n casadi::SX cs_tau(model.nv,1);\n for(Eigen::DenseIndex k = 0; k < model.nv; ++k)\n {\n cs_tau(k) = ad_data.tau[k];\n }\n casadi::Function eval_rnea(\"eval_rnea\",\n casadi::SXVector {cs_q,cs_v_int, cs_v, cs_a},\n casadi::SXVector {cs_tau});\n \n std::vector v_vec((size_t)model.nv);\n Eigen::Map(v_vec.data(),model.nv,1) = v;\n \n std::vector a_vec((size_t)model.nv);\n Eigen::Map(a_vec.data(),model.nv,1) = a;\n \n // check return value\n casadi::DM tau_res = eval_rnea(casadi::DMVector {q_vec,v_int_vec,v_vec,a_vec})[0];\n std::cout << \"tau_res = \" << tau_res << std::endl;\n Data::TangentVectorType tau_vec = Eigen::Map(static_cast< std::vector >(tau_res).data(),model.nv,1);\n \n BOOST_CHECK(data.tau.isApprox(tau_vec));\n \n // compute references\n Data::MatrixXs dtau_dq_ref(model.nv,model.nv), dtau_dv_ref(model.nv,model.nv), dtau_da_ref(model.nv,model.nv);\n dtau_dq_ref.setZero(); dtau_dv_ref.setZero(); dtau_da_ref.setZero();\n \n pinocchio::computeRNEADerivatives(model,data,q,v,a,dtau_dq_ref,dtau_dv_ref,dtau_da_ref);\n dtau_da_ref.triangularView() = dtau_da_ref.transpose().triangularView();\n \n // check with respect to q+dq\n casadi::SX dtau_dq = jacobian(cs_tau, cs_v_int);\n casadi::Function eval_dtau_dq(\"eval_dtau_dq\",\n casadi::SXVector {cs_q,cs_v_int, cs_v, cs_a},\n casadi::SXVector {dtau_dq});\n \n casadi::DM dtau_dq_res = eval_dtau_dq(casadi::DMVector {q_vec,v_int_vec,v_vec,a_vec})[0];\n std::vector dtau_dq_vec(static_cast< std::vector >(dtau_dq_res));\n BOOST_CHECK(Eigen::Map(dtau_dq_vec.data(),model.nv,model.nv).isApprox(dtau_dq_ref));\n \n // check with respect to v+dv\n casadi::SX dtau_dv = jacobian(cs_tau, cs_v);\n casadi::Function eval_dtau_dv(\"eval_dtau_dv\",\n casadi::SXVector {cs_q,cs_v_int, cs_v, cs_a},\n casadi::SXVector {dtau_dv});\n \n casadi::DM dtau_dv_res = eval_dtau_dv(casadi::DMVector {q_vec,v_int_vec,v_vec,a_vec})[0];\n std::vector dtau_dv_vec(static_cast< std::vector >(dtau_dv_res));\n BOOST_CHECK(Eigen::Map(dtau_dv_vec.data(),model.nv,model.nv).isApprox(dtau_dv_ref));\n \n // check with respect to a+da\n casadi::SX dtau_da = jacobian(cs_tau, cs_a);\n casadi::Function eval_dtau_da(\"eval_dtau_da\",\n casadi::SXVector {cs_q,cs_v_int, cs_v, cs_a},\n casadi::SXVector {dtau_da});\n \n casadi::DM dtau_da_res = eval_dtau_da(casadi::DMVector {q_vec,v_int_vec,v_vec,a_vec})[0];\n std::vector dtau_da_vec(static_cast< std::vector >(dtau_da_res));\n BOOST_CHECK(Eigen::Map(dtau_da_vec.data(),model.nv,model.nv).isApprox(dtau_da_ref));\n \n // call RNEA derivatives in Casadi\n casadi::SX cs_dtau_dq(model.nv,model.nv);\n casadi::SX cs_dtau_dv(model.nv,model.nv);\n casadi::SX cs_dtau_da(model.nv,model.nv);\n \n computeRNEADerivatives(ad_model,ad_data,q_ad,v_ad,a_ad);\n ad_data.M.triangularView()\n = ad_data.M.transpose().triangularView();\n \n pinocchio::casadi::copy(ad_data.dtau_dq,cs_dtau_dq);\n pinocchio::casadi::copy(ad_data.dtau_dv,cs_dtau_dv);\n pinocchio::casadi::copy(ad_data.M,cs_dtau_da);\n \n casadi::Function eval_rnea_derivatives_dq(\"eval_rnea_derivatives_dq\",\n casadi::SXVector {cs_q, cs_v, cs_a},\n casadi::SXVector {cs_dtau_dq});\n \n casadi::DM dtau_dq_res_direct = eval_rnea_derivatives_dq(casadi::DMVector {q_vec,v_vec,a_vec})[0];\n Data::MatrixXs dtau_dq_res_direct_map = Eigen::Map(static_cast< std::vector >(dtau_dq_res_direct).data(),model.nv,model.nv);\n BOOST_CHECK(dtau_dq_ref.isApprox(dtau_dq_res_direct_map));\n \n casadi::Function eval_rnea_derivatives_dv(\"eval_rnea_derivatives_dv\",\n casadi::SXVector {cs_q, cs_v, cs_a},\n casadi::SXVector {cs_dtau_dv});\n \n casadi::DM dtau_dv_res_direct = eval_rnea_derivatives_dv(casadi::DMVector {q_vec,v_vec,a_vec})[0];\n Data::MatrixXs dtau_dv_res_direct_map = Eigen::Map(static_cast< std::vector >(dtau_dv_res_direct).data(),model.nv,model.nv);\n BOOST_CHECK(dtau_dv_ref.isApprox(dtau_dv_res_direct_map));\n \n casadi::Function eval_rnea_derivatives_da(\"eval_rnea_derivatives_da\",\n casadi::SXVector {cs_q, cs_v, cs_a},\n casadi::SXVector {cs_dtau_da});\n \n casadi::DM dtau_da_res_direct = eval_rnea_derivatives_da(casadi::DMVector {q_vec,v_vec,a_vec})[0];\n Data::MatrixXs dtau_da_res_direct_map = Eigen::Map(static_cast< std::vector >(dtau_da_res_direct).data(),model.nv,model.nv);\n BOOST_CHECK(dtau_da_ref.isApprox(dtau_da_res_direct_map));\n}\n \n BOOST_AUTO_TEST_CASE(test_aba)\n {\n typedef double Scalar;\n typedef casadi::SX ADScalar;\n\n typedef pinocchio::ModelTpl Model;\n typedef Model::Data Data;\n\n typedef pinocchio::ModelTpl ADModel;\n typedef ADModel::Data ADData;\n\n Model model;\n pinocchio::buildModels::humanoidRandom(model);\n model.lowerPositionLimit.head<3>().fill(-1.);\n model.upperPositionLimit.head<3>().fill(1.);\n Data data(model);\n\n typedef Model::ConfigVectorType ConfigVector;\n typedef Model::TangentVectorType TangentVector;\n ConfigVector q(model.nq);\n q = pinocchio::randomConfiguration(model);\n TangentVector v(TangentVector::Random(model.nv));\n TangentVector tau(TangentVector::Random(model.nv));\n\n typedef ADModel::ConfigVectorType ConfigVectorAD;\n typedef ADModel::TangentVectorType TangentVectorAD;\n ADModel ad_model = model.cast();\n ADData ad_data(ad_model);\n\n pinocchio::aba(model,data,q,v,tau);\n\n casadi::SX cs_q = casadi::SX::sym(\"q\", model.nq);\n casadi::SX cs_v_int = casadi::SX::sym(\"v_inc\", model.nv);\n ConfigVectorAD q_ad(model.nq), v_int_ad(model.nv), q_int_ad(model.nq);\n q_ad = Eigen::Map(static_cast< std::vector >(cs_q).data(),model.nq,1);\n v_int_ad = Eigen::Map(static_cast< std::vector >(cs_v_int).data(),model.nv,1);\n \n pinocchio::integrate(ad_model,q_ad,v_int_ad,q_int_ad);\n casadi::SX cs_q_int(model.nq,1);\n pinocchio::casadi::copy(q_int_ad,cs_q_int);\n std::vector q_vec((size_t)model.nq);\n Eigen::Map(q_vec.data(),model.nq,1) = q;\n \n std::vector v_int_vec((size_t)model.nv);\n Eigen::Map(v_int_vec.data(),model.nv,1).setZero();\n\n casadi::SX cs_v = casadi::SX::sym(\"v\", model.nv);\n TangentVectorAD v_ad(model.nv);\n v_ad = Eigen::Map(static_cast< std::vector >(cs_v).data(),model.nv,1);\n\n casadi::SX cs_tau = casadi::SX::sym(\"tau\", model.nv);\n TangentVectorAD tau_ad(model.nv);\n tau_ad = Eigen::Map(static_cast< std::vector >(cs_tau).data(),model.nv,1);\n\n // ABA\n aba(ad_model,ad_data,q_int_ad,v_ad,tau_ad);\n casadi::SX cs_ddq(model.nv,1);\n for(Eigen::DenseIndex k = 0; k < model.nv; ++k)\n cs_ddq(k) = ad_data.ddq[k];\n casadi::Function eval_aba(\"eval_aba\",\n casadi::SXVector {cs_q, cs_v_int, cs_v, cs_tau},\n casadi::SXVector {cs_ddq});\n\n std::vector v_vec((size_t)model.nv);\n Eigen::Map(v_vec.data(),model.nv,1) = v;\n\n std::vector tau_vec((size_t)model.nv);\n Eigen::Map(tau_vec.data(),model.nv,1) = tau;\n\n casadi::DM ddq_res = eval_aba(casadi::DMVector {q_vec, v_int_vec, v_vec, tau_vec})[0];\n Data::TangentVectorType ddq_mat = Eigen::Map(static_cast< std::vector >(ddq_res).data(),\n model.nv,1);\n\n BOOST_CHECK(ddq_mat.isApprox(data.ddq));\n \n // compute references\n Data::MatrixXs ddq_dq_ref(model.nv,model.nv), ddq_dv_ref(model.nv,model.nv), ddq_dtau_ref(model.nv,model.nv);\n ddq_dq_ref.setZero(); ddq_dv_ref.setZero(); ddq_dtau_ref.setZero();\n \n pinocchio::computeABADerivatives(model,data,q,v,tau,ddq_dq_ref,ddq_dv_ref,ddq_dtau_ref);\n ddq_dtau_ref.triangularView()\n = ddq_dtau_ref.transpose().triangularView();\n \n // check with respect to q+dq\n casadi::SX ddq_dq = jacobian(cs_ddq, cs_v_int);\n casadi::Function eval_ddq_dq(\"eval_ddq_dq\",\n casadi::SXVector {cs_q,cs_v_int,cs_v,cs_tau},\n casadi::SXVector {ddq_dq});\n \n casadi::DM ddq_dq_res = eval_ddq_dq(casadi::DMVector {q_vec,v_int_vec,v_vec,tau_vec})[0];\n std::vector ddq_dq_vec(static_cast< std::vector >(ddq_dq_res));\n BOOST_CHECK(Eigen::Map(ddq_dq_vec.data(),model.nv,model.nv).isApprox(ddq_dq_ref));\n \n // check with respect to v+dv\n casadi::SX ddq_dv = jacobian(cs_ddq, cs_v);\n casadi::Function eval_ddq_dv(\"eval_ddq_dv\",\n casadi::SXVector {cs_q,cs_v_int, cs_v, cs_tau},\n casadi::SXVector {ddq_dv});\n \n casadi::DM ddq_dv_res = eval_ddq_dv(casadi::DMVector {q_vec,v_int_vec,v_vec,tau_vec})[0];\n std::vector ddq_dv_vec(static_cast< std::vector >(ddq_dv_res));\n BOOST_CHECK(Eigen::Map(ddq_dv_vec.data(),model.nv,model.nv).isApprox(ddq_dv_ref));\n \n // check with respect to a+da\n casadi::SX ddq_dtau = jacobian(cs_ddq, cs_tau);\n casadi::Function eval_ddq_da(\"eval_ddq_da\",\n casadi::SXVector {cs_q,cs_v_int, cs_v, cs_tau},\n casadi::SXVector {ddq_dtau});\n \n casadi::DM ddq_dtau_res = eval_ddq_da(casadi::DMVector {q_vec,v_int_vec,v_vec,tau_vec})[0];\n std::vector ddq_dtau_vec(static_cast< std::vector >(ddq_dtau_res));\n BOOST_CHECK(Eigen::Map(ddq_dtau_vec.data(),model.nv,model.nv).isApprox(ddq_dtau_ref));\n \n // call ABA derivatives in Casadi\n casadi::SX cs_ddq_dq(model.nv,model.nv);\n casadi::SX cs_ddq_dv(model.nv,model.nv);\n casadi::SX cs_ddq_dtau(model.nv,model.nv);\n \n computeABADerivatives(ad_model,ad_data,q_ad,v_ad,tau_ad);\n ad_data.Minv.triangularView()\n = ad_data.Minv.transpose().triangularView();\n \n pinocchio::casadi::copy(ad_data.ddq_dq,cs_ddq_dq);\n pinocchio::casadi::copy(ad_data.ddq_dv,cs_ddq_dv);\n pinocchio::casadi::copy(ad_data.Minv,cs_ddq_dtau);\n \n casadi::Function eval_aba_derivatives_dq(\"eval_aba_derivatives_dq\",\n casadi::SXVector {cs_q, cs_v, cs_tau},\n casadi::SXVector {cs_ddq_dq});\n \n casadi::DM ddq_dq_res_direct = eval_aba_derivatives_dq(casadi::DMVector {q_vec,v_vec,tau_vec})[0];\n Data::MatrixXs ddq_dq_res_direct_map = Eigen::Map(static_cast< std::vector >(ddq_dq_res_direct).data(),model.nv,model.nv);\n BOOST_CHECK(ddq_dq_ref.isApprox(ddq_dq_res_direct_map));\n \n casadi::Function eval_aba_derivatives_dv(\"eval_aba_derivatives_dv\",\n casadi::SXVector {cs_q, cs_v, cs_tau},\n casadi::SXVector {cs_ddq_dv});\n \n casadi::DM ddq_dv_res_direct = eval_aba_derivatives_dv(casadi::DMVector {q_vec,v_vec,tau_vec})[0];\n Data::MatrixXs ddq_dv_res_direct_map = Eigen::Map(static_cast< std::vector >(ddq_dv_res_direct).data(),model.nv,model.nv);\n BOOST_CHECK(ddq_dv_ref.isApprox(ddq_dv_res_direct_map));\n \n casadi::Function eval_aba_derivatives_dtau(\"eval_aba_derivatives_dtau\",\n casadi::SXVector {cs_q, cs_v, cs_tau},\n casadi::SXVector {cs_ddq_dtau});\n \n casadi::DM ddq_dtau_res_direct = eval_aba_derivatives_dtau(casadi::DMVector {q_vec,v_vec,tau_vec})[0];\n Data::MatrixXs ddq_dtau_res_direct_map = Eigen::Map(static_cast< std::vector >(ddq_dtau_res_direct).data(),model.nv,model.nv);\n BOOST_CHECK(ddq_dtau_ref.isApprox(ddq_dtau_res_direct_map));\n }\n\nBOOST_AUTO_TEST_SUITE_END()\n", "meta": {"hexsha": "d3d98e358a659d3f98e31f7b6b286cd00525e146", "size": 17587, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "unittest/casadi-algo-derivatives.cpp", "max_stars_repo_name": "thanhndv212/pinocchio", "max_stars_repo_head_hexsha": "3b4d272bf4e8a231954b71201ee7e0963c944aef", "max_stars_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_stars_count": 716.0, "max_stars_repo_stars_event_min_datetime": "2015-03-30T16:26:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T12:26:58.000Z", "max_issues_repo_path": "unittest/casadi-algo-derivatives.cpp", "max_issues_repo_name": "thanhndv212/pinocchio", "max_issues_repo_head_hexsha": "3b4d272bf4e8a231954b71201ee7e0963c944aef", "max_issues_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_issues_count": 1130.0, "max_issues_repo_issues_event_min_datetime": "2015-02-21T17:30:44.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T09:06:22.000Z", "max_forks_repo_path": "unittest/casadi-algo-derivatives.cpp", "max_forks_repo_name": "thanhndv212/pinocchio", "max_forks_repo_head_hexsha": "3b4d272bf4e8a231954b71201ee7e0963c944aef", "max_forks_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_forks_count": 239.0, "max_forks_repo_forks_event_min_datetime": "2015-02-05T14:15:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T23:51:47.000Z", "avg_line_length": 45.3273195876, "max_line_length": 154, "alphanum_fraction": 0.6791379997, "num_tokens": 5323, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8128672997041659, "lm_q2_score": 0.6150878555160665, "lm_q1q2_score": 0.49998480419417113}} {"text": "/* ---------------------------------------------------------------------\n *\n * Copyright (C) 2000 - 2020 by the deal.II authors\n *\n * This file is part of the deal.II library.\n *\n * The deal.II library is free software; you can use it, redistribute\n * it, and/or modify it under the terms of the GNU Lesser General\n * Public License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n * The full text of the license can be found in the file LICENSE.md at\n * the top level directory of deal.II.\n *\n * ---------------------------------------------------------------------\n\n *\n * Author: Wolfgang Bangerth, University of Heidelberg, 2000\n */\n\n\n\n#include \n#include \n\n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n\n#include \n\nusing namespace dealii;\n\n\n\ntemplate \nclass Step6\n{\npublic:\n Step6();\n\n void\n run();\n\nprivate:\n void\n setup_system();\n void\n assemble_system();\n void\n solve();\n void\n refine_grid();\n void\n output_results(const unsigned int cycle) const;\n\n Triangulation triangulation;\n\n FE_Q fe_velocity;\n FE_DGP fe_pressure;\n FESystem fe;\n\n FEValuesExtractors::Vector velocity;\n FEValuesExtractors::Scalar pressure;\n\n DoFHandler dof_handler;\n\n AffineConstraints constraints;\n\n SparseMatrix system_matrix;\n SparsityPattern sparsity_pattern;\n\n Vector solution;\n Vector system_rhs;\n};\n\n\n\ntemplate \nStep6::Step6()\n : fe_velocity(2)\n , fe_pressure(1)\n , fe(fe_velocity, dim, fe_pressure, 1)\n , velocity(0)\n , pressure(dim)\n , dof_handler(triangulation)\n{}\n\n\n\ntemplate \nvoid\nStep6::setup_system()\n{\n dof_handler.distribute_dofs(fe);\n\n solution.reinit(dof_handler.n_dofs());\n system_rhs.reinit(dof_handler.n_dofs());\n\n constraints.clear();\n DoFTools::make_hanging_node_constraints(dof_handler, constraints);\n\n\n VectorTools::interpolate_boundary_values(dof_handler,\n 0,\n Functions::ZeroFunction(dim +\n 1),\n constraints,\n fe.component_mask(velocity));\n\n constraints.close();\n\n DynamicSparsityPattern dsp(dof_handler.n_dofs());\n DoFTools::make_sparsity_pattern(dof_handler,\n dsp,\n constraints,\n /*keep_constrained_dofs = */ false);\n\n sparsity_pattern.copy_from(dsp);\n\n system_matrix.reinit(sparsity_pattern);\n}\n\n\n\ntemplate \nvoid\nStep6::assemble_system()\n{\n const QGauss quadrature_formula(fe.degree + 1);\n\n FEValues fe_values(fe,\n quadrature_formula,\n update_values | update_gradients |\n update_quadrature_points | update_JxW_values);\n\n const unsigned int dofs_per_cell = fe.dofs_per_cell;\n\n FullMatrix cell_matrix(dofs_per_cell, dofs_per_cell);\n Vector cell_rhs(dofs_per_cell);\n\n std::vector local_dof_indices(dofs_per_cell);\n\n Functions::SymbolicFunction rhs_function(\n dim == 2 ?\n \"x^2*y^2*(x - 1)^2*(2*y - 2) + 2*x^2*y*(x - 1)^2*(y - 1)^2;-x^2*y^2*(2*x - 2)*(y - 1)^2 - 2*x*y^2*(x - 1)^2*(y - 1)^2; 0\" :\n \"x^2*y^2*(x - 1)^2*(2*y - 2) + 2*x^2*y*(x - 1)^2*(y - 1)^2;-x^2*y^2*(2*x - 2)*(y - 1)^2 - 2*x*y^2*(x - 1)^2*(y - 1)^2; 0; 0\");\n\n Vector rhs_values(dim + 1);\n for (const auto &cell : dof_handler.active_cell_iterators())\n {\n cell_matrix = 0;\n cell_rhs = 0;\n\n fe_values.reinit(cell);\n\n for (const unsigned int q_index : fe_values.quadrature_point_indices())\n {\n rhs_function.vector_value(fe_values.quadrature_point(q_index),\n rhs_values);\n for (const unsigned int i : fe_values.dof_indices())\n {\n auto v = fe_values[velocity].value(i, q_index);\n auto div_v = fe_values[velocity].divergence(i, q_index);\n auto grad_v = fe_values[velocity].gradient(i, q_index);\n auto q = fe_values[pressure].value(i, q_index);\n\n for (const unsigned int j : fe_values.dof_indices())\n {\n auto div_u = fe_values[velocity].divergence(j, q_index);\n auto grad_u = fe_values[velocity].gradient(j, q_index);\n auto p = fe_values[pressure].value(j, q_index);\n\n cell_matrix(i, j) +=\n (scalar_product(grad_u, grad_v) - div_v * p - div_u * q) *\n fe_values.JxW(q_index); // dx\n }\n for (unsigned int d = 0; d < dim; ++d)\n cell_rhs(i) += (1000 * rhs_values[d] * // f(x)\n v[d] * // phi_i(x_q)\n fe_values.JxW(q_index)); // dx\n }\n }\n\n cell->get_dof_indices(local_dof_indices);\n constraints.distribute_local_to_global(\n cell_matrix, cell_rhs, local_dof_indices, system_matrix, system_rhs);\n }\n}\n\n\n\ntemplate \nvoid\nStep6::solve()\n{\n SparseDirectUMFPACK inverse;\n inverse.initialize(system_matrix);\n\n // SolverControl solver_control(1000, 1e-12);\n // SolverCG> solver(solver_control);\n\n // PreconditionSSOR> preconditioner;\n // preconditioner.initialize(system_matrix, 1.2);\n\n // solver.solve(system_matrix, solution, system_rhs, preconditioner);\n\n inverse.vmult(solution, system_rhs);\n constraints.distribute(solution);\n}\n\n\n\ntemplate \nvoid\nStep6::refine_grid()\n{\n Vector estimated_error_per_cell(triangulation.n_active_cells());\n\n KellyErrorEstimator::estimate(dof_handler,\n QGauss(fe.degree + 1),\n {},\n solution,\n estimated_error_per_cell,\n fe.component_mask(velocity));\n\n GridRefinement::refine_and_coarsen_fixed_number(triangulation,\n estimated_error_per_cell,\n 0.3,\n 0.03);\n\n triangulation.execute_coarsening_and_refinement();\n}\n\n\n\ntemplate \nvoid\nStep6::output_results(const unsigned int cycle) const\n{\n {\n std::vector solution_names(dim, \"velocity\");\n solution_names.emplace_back(\"pressure\");\n DataOutBase::VtkFlags flags;\n flags.write_higher_order_cells = true;\n\n std::vector\n data_component_interpretation(\n dim, DataComponentInterpretation::component_is_part_of_vector);\n\n data_component_interpretation.push_back(\n DataComponentInterpretation::component_is_scalar);\n DataOut data_out;\n data_out.set_flags(flags);\n\n data_out.attach_dof_handler(dof_handler);\n data_out.add_data_vector(solution,\n solution_names,\n DataOut::type_dof_data,\n data_component_interpretation);\n data_out.build_patches(fe.degree);\n\n std::ofstream output(\"solution-\" + std::to_string(cycle) + \".vtu\");\n data_out.write_vtu(output);\n }\n}\n\n\n\ntemplate \nvoid\nStep6::run()\n{\n for (unsigned int cycle = 0; cycle < 4; ++cycle)\n {\n std::cout << \"Cycle \" << cycle << ':' << std::endl;\n\n if (cycle == 0)\n {\n GridGenerator::hyper_cube(triangulation);\n triangulation.refine_global(4);\n }\n else\n refine_grid();\n\n\n std::cout << \" Number of active cells: \"\n << triangulation.n_active_cells() << std::endl;\n\n setup_system();\n\n std::cout << \" Number of degrees of freedom: \" << dof_handler.n_dofs()\n << std::endl;\n\n assemble_system();\n solve();\n output_results(cycle);\n }\n}\n\n\n\nint\nmain()\n{\n try\n {\n Step6<2> laplace_problem_2d;\n laplace_problem_2d.run();\n }\n catch (std::exception &exc)\n {\n std::cerr << std::endl\n << std::endl\n << \"----------------------------------------------------\"\n << std::endl;\n std::cerr << \"Exception on processing: \" << std::endl\n << exc.what() << std::endl\n << \"Aborting!\" << std::endl\n << \"----------------------------------------------------\"\n << std::endl;\n\n return 1;\n }\n catch (...)\n {\n std::cerr << std::endl\n << std::endl\n << \"----------------------------------------------------\"\n << std::endl;\n std::cerr << \"Unknown exception!\" << std::endl\n << \"Aborting!\" << std::endl\n << \"----------------------------------------------------\"\n << std::endl;\n return 1;\n }\n\n return 0;\n}\n", "meta": {"hexsha": "ecfd2481fd8d7d720fd9dfcef79d86e839a33d3b", "size": 10114, "ext": "cc", "lang": "C++", "max_stars_repo_path": "cpp/08_saddle_point_problems/stokes.cc", "max_stars_repo_name": "luca-heltai/advanced-fem", "max_stars_repo_head_hexsha": "7dc5416db07ee67410819e4f9680471548c6641a", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 7.0, "max_stars_repo_stars_event_min_datetime": "2019-03-13T22:07:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-12T07:59:37.000Z", "max_issues_repo_path": "cpp/08_saddle_point_problems/stokes.cc", "max_issues_repo_name": "luca-heltai/advanced-fem", "max_issues_repo_head_hexsha": "7dc5416db07ee67410819e4f9680471548c6641a", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cpp/08_saddle_point_problems/stokes.cc", "max_forks_repo_name": "luca-heltai/advanced-fem", "max_forks_repo_head_hexsha": "7dc5416db07ee67410819e4f9680471548c6641a", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.8622589532, "max_line_length": 132, "alphanum_fraction": 0.5625865137, "num_tokens": 2403, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8031737963569016, "lm_q2_score": 0.6224593312018546, "lm_q1q2_score": 0.4999430241191715}} {"text": " #include \n #include \n #include \n #include \n #include \n #include \n #include \n\n using boost::spirit::rule;\n using boost::spirit::parser_tag;\n using boost::spirit::ch_p;\n using boost::spirit::real_p;\n\n using boost::spirit::tree_node;\n using boost::spirit::node_val_data;\n\n // The grammar\n struct parser: public boost::spirit::grammar\n {\n enum rule_ids { addsub_id, multdiv_id, value_id, real_id };\n\n struct set_value\n {\n set_value(parser const& p): self(p) {}\n void operator()(tree_node >& node,\n std::string::iterator begin,\n std::string::iterator end) const\n {\n node.value.value(self.tmp);\n }\n parser const& self;\n };\n\n mutable double tmp;\n\n template struct definition\n {\n rule > addsub;\n rule > multdiv;\n rule > value;\n rule > real;\n\n definition(parser const& self)\n {\n using namespace boost::spirit;\n addsub = multdiv\n >> *((root_node_d[ch_p('+')] | root_node_d[ch_p('-')]) >> multdiv);\n multdiv = value\n >> *((root_node_d[ch_p('*')] | root_node_d[ch_p('/')]) >> value);\n value = real | inner_node_d[('(' >> addsub >> ')')];\n real = leaf_node_d[access_node_d[real_p[assign_a(self.tmp)]][set_value(self)]];\n }\n\n rule > const& start() const\n {\n return addsub;\n }\n };\n };\n\n template\n double evaluate(TreeIter const& i)\n {\n double op1, op2;\n switch (i->value.id().to_long())\n {\n case parser::real_id:\n return i->value.value();\n case parser::value_id:\n case parser::addsub_id:\n case parser::multdiv_id:\n op1 = evaluate(i->children.begin());\n op2 = evaluate(i->children.begin()+1);\n switch(*i->value.begin())\n {\n case '+':\n return op1 + op2;\n case '-':\n return op1 - op2;\n case '*':\n return op1 * op2;\n case '/':\n return op1 / op2;\n default:\n assert(!\"Should not happen\");\n }\n default:\n assert(!\"Should not happen\");\n }\n return 0;\n }\n\n // the read/eval/write loop\n int main()\n {\n parser eval;\n std::string line;\n while (std::cout << \"Expression: \"\n && std::getline(std::cin, line)\n && !line.empty())\n {\n typedef boost::spirit::node_val_data_factory factory_t;\n boost::spirit::tree_parse_info info =\n boost::spirit::ast_parse(line.begin(), line.end(),\n eval, boost::spirit::space_p);\n if (info.full)\n {\n std::cout << \"Result: \" << evaluate(info.trees.begin()) << std::endl;\n }\n else\n {\n std::cout << \"Error in expression.\" << std::endl;\n }\n }\n };\n", "meta": {"hexsha": "6293355eda2e103c76cedbef8359592a87e0dad0", "size": 3065, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "lang/C++/arithmetic-evaluation.cpp", "max_stars_repo_name": "ethansaxenian/RosettaDecode", "max_stars_repo_head_hexsha": "8ea1a42a5f792280b50193ad47545d14ee371fb7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2018-11-09T22:08:38.000Z", "max_stars_repo_stars_event_max_datetime": "2018-11-09T22:08:38.000Z", "max_issues_repo_path": "lang/C++/arithmetic-evaluation.cpp", "max_issues_repo_name": "ethansaxenian/RosettaDecode", "max_issues_repo_head_hexsha": "8ea1a42a5f792280b50193ad47545d14ee371fb7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lang/C++/arithmetic-evaluation.cpp", "max_forks_repo_name": "ethansaxenian/RosettaDecode", "max_forks_repo_head_hexsha": "8ea1a42a5f792280b50193ad47545d14ee371fb7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2018-11-09T22:08:40.000Z", "max_forks_repo_forks_event_max_datetime": "2018-11-09T22:08:40.000Z", "avg_line_length": 26.1965811966, "max_line_length": 86, "alphanum_fraction": 0.5768352365, "num_tokens": 782, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8031737963569016, "lm_q2_score": 0.6224593312018545, "lm_q1q2_score": 0.4999430241191714}} {"text": "// Software License for MTL\n// \n// Copyright (c) 2007 The Trustees of Indiana University.\n// 2008 Dresden University of Technology and the Trustees of Indiana University.\n// 2010 SimuNova UG (haftungsbeschr\u00e4nkt), www.simunova.com.\n// All rights reserved.\n// Authors: Peter Gottschling and Andrew Lumsdaine\n// \n// This file is part of the Matrix Template Library\n// \n// See also license.mtl.txt in the distribution.\n\n#include \n#include \n#include \n\n#include \n#include \n#include \n\n\ntemplate \nvoid test_accumulate(const char* name)\n{\n const int array_size= 10;\n Element array[array_size];\n for (int i= 0; i < array_size; i++) \n \tarray[i]= Element(i);\n\n std::list l;\n for (int i= 0; i < array_size; i++) \n \tl.push_back(Element(i));\n \n std::cout << '\\n' << name << '\\n' << \" Add: \";\n math::accumulate(&array[0], array+array_size, Element(0), math::add());\n std::cout << \"Mult: \";\n math::accumulate(array, array+array_size, Element(1), math::mult());\n std::cout << \"Mult [with a list]: \";\n math::accumulate(l.begin(), l.end(), Element(1), math::mult());\n std::cout << \" Min: \";\n math::accumulate(array, array+array_size, Element(1000), math::min());\n std::cout << \" Max: \";\n math::accumulate(array, array+array_size, Element(-1000), math::max());\n}\n\n\nint main(int, char* [])\n{\n test_accumulate(\"int\");\n test_accumulate(\"float\");\n test_accumulate(\"double\");\n std::cout << '\\n';\n\n return 0;\n}\n", "meta": {"hexsha": "0e30a4af0e573340d9a0f3c4cf5673ce56032711", "size": 1748, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "libs/numeric/linear_algebra/test/accumulation_simple.cpp", "max_stars_repo_name": "lit-uriy/mtl4-mirror", "max_stars_repo_head_hexsha": "37cf7c2847165d3537cbc3400cb5fde6f80e3d8b", "max_stars_repo_licenses": ["MTLL"], "max_stars_count": 24.0, "max_stars_repo_stars_event_min_datetime": "2019-03-26T15:25:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T10:00:45.000Z", "max_issues_repo_path": "libs/numeric/linear_algebra/test/accumulation_simple.cpp", "max_issues_repo_name": "lit-uriy/mtl4-mirror", "max_issues_repo_head_hexsha": "37cf7c2847165d3537cbc3400cb5fde6f80e3d8b", "max_issues_repo_licenses": ["MTLL"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2020-04-17T12:35:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-03T15:46:25.000Z", "max_forks_repo_path": "libs/numeric/linear_algebra/test/accumulation_simple.cpp", "max_forks_repo_name": "lit-uriy/mtl4-mirror", "max_forks_repo_head_hexsha": "37cf7c2847165d3537cbc3400cb5fde6f80e3d8b", "max_forks_repo_licenses": ["MTLL"], "max_forks_count": 10.0, "max_forks_repo_forks_event_min_datetime": "2019-12-01T13:40:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-14T08:39:54.000Z", "avg_line_length": 31.2142857143, "max_line_length": 94, "alphanum_fraction": 0.6458810069, "num_tokens": 461, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7217432062975979, "lm_q2_score": 0.6926419894793248, "lm_q1q2_score": 0.49990965030315493}} {"text": "// Copyright (c) 2020 Sabar Nimmagadda. All rights reserved.\n\n#include \"matrix_app.h\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"CinderImGui.h\"\nusing Eigen::MatrixXd;\nnamespace matrixapp {\nusing cinder::app::KeyEvent;\nusing cinder::Color;\nusing cinder::ColorA;\nusing cinder::Rectf;\nusing cinder::TextBox;\nusing std::string;\nconst char kNormalFont[] = \"Arial\";\n\nMatrixApp::MatrixApp()\n : state_{AppState::kSelecting} {}\n\n\nvoid MatrixApp::setup() {\n ui::initialize();\n test_mat << 1, 2, 3,\n 4, 5, 6,\n 7, 8, 9;\n}\n\nvoid MatrixApp::update() {\n if (state_ == AppState::kInputtingData) {\n InputMatrix();\n String_To_Matrix();\n }\n}\n\nvoid MatrixApp::draw() {\n DrawBackground();\n const cinder::vec2 center = getWindowCenter();\n const cinder::ivec2 size = {500, 50};\n const Color color = Color::white();\n if (state_ == AppState::kSelecting) {\n PrintText(\"WELCOME\", color, size, {100, 50});\n CreateMenu();\n }\n if (state_ == AppState::kSolved) {\n DrawAnswer();\n }\n}\n\n\nvoid MatrixApp::CreateMenu() {\n ui::ScopedWindow window( \"Choose problem\", ImGuiWindowFlags_MenuBar );\n if( ui::BeginMenuBar() ){\n if( ui::BeginMenu( \"Problem Type\" )) {\n if (ui::MenuItem( \"RREF\" )) {\n problemType = ProblemType::RREF;\n state_= AppState::kInputtingData;\n }\n if (ui::MenuItem( \"Row Space\" )) {\n problemType = ProblemType::RowSpace;\n state_= AppState::kInputtingData;\n }\n if (ui::MenuItem( \"Column Space\")) {\n problemType = ProblemType::ColumnSpace;\n state_= AppState::kInputtingData;\n }\n if (ui::MenuItem(\"LU Decomposition\")) {\n problemType = ProblemType::LUDecomposition;\n state_= AppState::kInputtingData;\n }\n if (ui::MenuItem( \"Permutation Matrix\")) {\n problemType = ProblemType::PermutationMatrix;\n state_= AppState::kInputtingData;\n }\n if (ui::MenuItem( \"Inverse\")) {\n problemType = ProblemType::Inverse;\n state_= AppState::kInputtingData;\n }\n if (ui::MenuItem( \"Matrix Multiplication\")) {\n problemType = ProblemType::MatrixMultiplication;\n state_ = AppState::kInputtingData;\n }\n if (ui::MenuItem(\"QR Decomposition\")) {\n problemType = ProblemType::QRDecomposition;\n state_ = AppState::kInputtingData;\n }\n if (ui::MenuItem(\"Dot Product\")) {\n problemType = ProblemType::DotProduct;\n state_ = AppState::kInputtingData;\n }\n if (ui::MenuItem(\"Eigen Vectors\")) {\n problemType = ProblemType::EigenVectors;\n state_ = AppState::kInputtingData;\n }\n if (ui::MenuItem(\"Eigen Values\")) {\n problemType = ProblemType::EigenValues;\n state_ = AppState::kInputtingData;\n }\n if (ui::MenuItem(\"Determinant\")) {\n problemType = ProblemType::Determinant;\n state_ = AppState::kInputtingData;\n }\n ui::EndMenu();\n }\n ui::EndMenuBar();\n }\n const ImVec2 vec2(500, 500);\n ui::SetWindowSize(\"Choose problem\", vec2);\n}\n\n\nvoid MatrixApp::DrawAnswer() {\n if (problemType == ProblemType::QRDecomposition) {\n DrawQRAnswer(in_mat1);\n }\n if (problemType == ProblemType::LUDecomposition) {\n DrawLUAnswer(in_mat1);\n }\n if (problemType == ProblemType::PermutationMatrix) {\n DrawPermutationAnswer(in_mat1);\n }\n if (problemType == ProblemType::RREF) {\n DrawRREFAnswer(in_mat1);\n }\n if (problemType == ProblemType::EigenValues) {\n DrawEigenValuesAnswer(in_mat1);\n }\n if (problemType == ProblemType::Inverse) {\n DrawInverseAnswer(in_mat1);\n }\n if (problemType == ProblemType::EigenVectors) {\n DrawEigenVectorsAnswer(in_mat1);\n }\n if (problemType == ProblemType::DotProduct) {\n DrawDotProductAnswer(in_mat1, in_mat2);\n }\n if (problemType == ProblemType::MatrixMultiplication) {\n DrawMultiplicationAnswer(in_mat1, in_mat2);\n }\n if (problemType == ProblemType::Determinant) {\n DrawDeterminantAnswer(in_mat1);\n }\n if (problemType == ProblemType::RowSpace) {\n DrawRowSpaceAnswer(in_mat1);\n }\n if (problemType == ProblemType::ColumnSpace) {\n DrawColSpaceAnswer(in_mat1);\n }\n}\n\n\nvoid MatrixApp::keyDown(KeyEvent event) {\n}\n\nvoid MatrixApp::PrintText(const string& text, const Color color, const cinder::ivec2& size,\n const cinder::vec2& loc) {\n cinder::gl::color(color);\n auto box = TextBox()\n .alignment(TextBox::CENTER)\n .font(cinder::Font(kNormalFont, 30))\n .size(size)\n .color(color)\n .backgroundColor(ColorA(0, 0, 1, 0))\n .text(text);\n\n const auto box_size = box.getSize();\n const cinder::vec2 locp = {loc.x - box_size.x / 2, loc.y - box_size.y / 2};\n const auto surface = box.render();\n const auto texture = cinder::gl::Texture::create(surface);\n cinder::gl::draw(texture, locp);\n}\nvoid MatrixApp::DrawBackground() const {\n cinder::gl::clear(Color(0, 0, 0));\n}\nvoid MatrixApp::DrawLUAnswer(const MatrixXd& matrix) {\n const cinder::vec2 center = getWindowCenter();\n const cinder::ivec2 size = {500, 500};\n const Color color = Color::white();\n std::stringstream ss;\n ss << Computations::ComputeL(matrix);\n PrintText(\"Your L Matrix is\",color,{500,500},{center.x-50,center.y - 50});\n PrintText(ss.str(), color, size , center);\n std::stringstream st;\n st << Computations::ComputeU(matrix);\n PrintText(\"Your U Matrix is\",color,{500,500},{center.x-50,center.y + 150});\n PrintText(st.str(), color, size, {center.x, center.y + 200});\n BackToMenu();\n}\n\nvoid MatrixApp::DrawDotProductAnswer(MatrixXd matrix1, MatrixXd matrix2) {\n const cinder::vec2 center = getWindowCenter();\n const cinder::ivec2 size = {500, 500};\n const Color color = Color::white();\n std::stringstream ss;\n ss << Computations::ComputeDotProduct(std::move(matrix1), std::move(matrix2), kDimension);\n PrintText(\"Your Dot Product is\", color, {500,500},{center.x-50,center.y - 50});\n PrintText(ss.str(), color, size, center);\n BackToMenu();\n}\n\nvoid MatrixApp::DrawPermutationAnswer(MatrixXd matrix) {\n const cinder::vec2 center = getWindowCenter();\n const cinder::ivec2 size = {500, 500};\n const Color color = Color::white();\n std::stringstream ss;\n ss << Computations::ComputePermutationMatrix(std::move(matrix));\n PrintText(\"Your Permutation Matrix is\",color,{500,500},{center.x-50,center.y - 50});\n PrintText(ss.str(), color, size , center);\n BackToMenu();\n}\n\nvoid MatrixApp::DrawRREFAnswer(const MatrixXd& matrix) {\n const cinder::vec2 center = getWindowCenter();\n const cinder::ivec2 size = {500, 500};\n const Color color = Color::white();\n std::stringstream ss;\n ss << Computations::ComputeRREF(matrix);\n PrintText(\"Your Row Reduced Matrix is\",color,{500,500},{center.x-50,center.y - 50});\n PrintText(ss.str(), color, size , center);\n BackToMenu();\n}\n\nvoid MatrixApp::DrawMultiplicationAnswer(MatrixXd matrix1, MatrixXd matrix2) {\n const cinder::vec2 center = getWindowCenter();\n const cinder::ivec2 size = {500, 500};\n const Color color = Color::white();\n std::stringstream ss;\n ss << Computations::ComputeMultiply(std::move(matrix1), std::move(matrix2));\n PrintText(\"The product Matrix is\",color,{500,500},{center.x-50,center.y - 50});\n PrintText(ss.str(), color, size , center);\n BackToMenu();\n}\n\nvoid MatrixApp::DrawInverseAnswer(const MatrixXd& matrix) {\n const cinder::vec2 center = getWindowCenter();\n const cinder::ivec2 size = {500, 500};\n const Color color = Color::white();\n std::stringstream ss;\n ss << Computations::ComputeInverse(matrix);\n PrintText(\"The Inverse Matrix is\",color,{500,500},{center.x-50,center.y - 50});\n PrintText(ss.str(), color, size , center);\n BackToMenu();\n}\n\nvoid MatrixApp::DrawQRAnswer(const MatrixXd& matrix) {\n const cinder::vec2 center = getWindowCenter();\n const cinder::ivec2 size = {500, 500};\n const Color color = Color::white();\n std::stringstream ss;\n ss << Computations::ComputeQ(matrix);\n PrintText(\"Your Q Matrix is\",color,{500,500},{center.x-50,center.y - 50});\n PrintText(ss.str(), color, size , center);\n std::stringstream st;\n st << Computations::ComputeR(matrix);\n PrintText(\"Your R Matrix is\",color,{500,500},{center.x-50,center.y + 150});\n PrintText(st.str(), color, size, {center.x, center.y + 200});\n BackToMenu();\n}\n\nvoid MatrixApp::DrawEigenVectorsAnswer(MatrixXd matrix) {\n const cinder::vec2 center = getWindowCenter();\n const cinder::ivec2 size = {500, 500};\n const Color color = Color::white();\n std::stringstream ss;\n ss << Computations::ComputeEigenVectors(matrix);\n PrintText(\"The Matrix of EigenVectors is\",color,{500,500},{center.x-50,center.y - 50});\n PrintText(ss.str(), color, size , center);\n BackToMenu();\n}\n\nvoid MatrixApp::DrawEigenValuesAnswer(MatrixXd matrix) {\n const cinder::vec2 center = getWindowCenter();\n const cinder::ivec2 size = {500, 500};\n const Color color = Color::white();\n std::stringstream ss;\n ss << Computations::ComputeEigenValues(matrix);\n PrintText(\"The Eigenvalues are\",color,{500,500},{center.x-50,center.y - 50});\n PrintText(ss.str(), color, size , center);\n BackToMenu();\n}\n\nvoid MatrixApp::DrawDeterminantAnswer(const MatrixXd& matrix) {\n const cinder::vec2 center = getWindowCenter();\n const cinder::ivec2 size = {500, 500};\n const Color color = Color::white();\n std::stringstream ss;\n ss << Computations::ComputeDeterminant(matrix);\n PrintText(\"Your determinant is\", color, {500,500},{center.x-50,center.y - 50});\n PrintText(ss.str(), color, size, center);\n BackToMenu();\n}\n\nvoid MatrixApp::DrawColSpaceAnswer(const MatrixXd& matrix) {\n const cinder::vec2 center = getWindowCenter();\n const cinder::ivec2 size = {500, 500};\n const Color color = Color::white();\n std::stringstream ss;\n ss << Computations::ComputeEigenVectors(matrix.inverse());\n PrintText(\"Your Column Space Matrix is\", color, {500,500},{center.x-50,center.y - 50});\n PrintText(ss.str(), color, size, center);\n BackToMenu();\n}\n\nvoid MatrixApp::DrawRowSpaceAnswer(const MatrixXd& matrix) {\n const cinder::vec2 center = getWindowCenter();\n const cinder::ivec2 size = {500, 500};\n const Color color = Color::white();\n std::stringstream ss;\n ss << Computations::ComputeEigenVectors(matrix);\n PrintText(\"Your Row Space Matrix is\", color, {500,500},{center.x-50,center.y - 50});\n PrintText(ss.str(), color, size, center);\n BackToMenu();\n}\n\nvoid MatrixApp::InputMatrix() {\n if (problemType != ProblemType::DotProduct && problemType != ProblemType::MatrixMultiplication) {\n ui::InputInt(\"Enter dimension\", &kDimension);\n ui::InputText(\"Input matrix\", &input_string);\n } else {\n ui::InputInt(\"Enter dimension\", &kDimension);\n ui::InputText(\"Input first matrix\", &input_string);\n ui::InputText(\"Input second matrix\", &input_string2);\n str_mat2 = input_string2;\n }\n str_mat = input_string;\n}\n\n\nvoid MatrixApp::String_To_Matrix() {\n int mat_size = kDimension *kDimension;\n if ( problemType != ProblemType::DotProduct\n && problemType != ProblemType::MatrixMultiplication\n && str_mat.size() == mat_size * 2 && mat_size != 0) { // <= size * 2\n //When the computation only needs one matrix.\n std::istringstream ss(str_mat);\n for (int r = 0; r < kDimension; r++) {\n for (int c = 0; c < kDimension; c++) {\n int elem;\n ss >> elem;\n in_mat1(r, c) = elem;\n }\n }\n state_ = AppState::kSolved;\n } else if (str_mat.size() == mat_size * 2 && str_mat2.size() == mat_size * 2 && mat_size != 0){\n //Made else if instead of else, because size cannot be zero ever.\n std::istringstream ss1(str_mat);\n std::istringstream ss2(str_mat2);\n for (int r = 0; r < kDimension; r++) {\n for (int c = 0; c < kDimension; c++) {\n int elem1;\n int elem2;\n ss1 >> elem1;\n ss2 >> elem2;\n in_mat1(r, c) = elem1;\n in_mat2(r, c) = elem2;\n }\n }\n state_ = AppState::kSolved;\n }\n}\n\nvoid MatrixApp::BackToMenu() {\n const cinder::ivec2 button_size = {500, 50};\n if (ui::Button(\"BACK TO MAIN MENU\", button_size)) {\n state_ = AppState::kSelecting;\n input_string = \"\";\n input_string2 = \"\";\n kDimension = 0;\n }\n}\n\n\n} // namespace myapp\n", "meta": {"hexsha": "8ff19c59250e095040efa88d5b0e0bc3d2ad92fa", "size": 13278, "ext": "cc", "lang": "C++", "max_stars_repo_path": "apps/matrix_app.cc", "max_stars_repo_name": "CS126SP20/Matrix-Machine-LinAlgComputer-SabarNimmagadda", "max_stars_repo_head_hexsha": "928af6c373363eabc7edadf63f3e3a7220d864f1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "apps/matrix_app.cc", "max_issues_repo_name": "CS126SP20/Matrix-Machine-LinAlgComputer-SabarNimmagadda", "max_issues_repo_head_hexsha": "928af6c373363eabc7edadf63f3e3a7220d864f1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "apps/matrix_app.cc", "max_forks_repo_name": "CS126SP20/Matrix-Machine-LinAlgComputer-SabarNimmagadda", "max_forks_repo_head_hexsha": "928af6c373363eabc7edadf63f3e3a7220d864f1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.7591623037, "max_line_length": 101, "alphanum_fraction": 0.6140231963, "num_tokens": 3424, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.6926419831347361, "lm_q2_score": 0.7217431943271999, "lm_q1q2_score": 0.499909637432791}} {"text": "//\n// OpenTissue, A toolbox for physical based simulation and animation.\n// Copyright (C) 2007 Department of Computer Science, University of Copenhagen\n//\n#include \n\n#include \n#include \n\n#define BOOST_AUTO_TEST_MAIN\n#include \n#include \n#include \n#include \n#include \n#include \n\nBOOST_AUTO_TEST_SUITE(opentissue_geometry_util_plane_box);\n\nBOOST_AUTO_TEST_CASE(case_by_case_testing)\n{\n\n typedef OpenTissue::math::BasicMathTypes math_types;\n typedef math_types::vector3_type vector3_type;\n typedef math_types::real_type real_type;\n\n typedef OpenTissue::geometry::PlaneBox plane_box_type; \n plane_box_type plane_box;\n\n real_type tol = 0.0001;\n\n vector3_type min_coord(0,0,0);\n vector3_type max_coord(1,1,1);\n\n plane_box.init(min_coord,max_coord);\n BOOST_CHECK( plane_box.box().min().is_equal( min_coord, tol ) );\n BOOST_CHECK( plane_box.box().max().is_equal( max_coord, tol ) );\n\n BOOST_CHECK( plane_box.n() == vector3_type(1,0,0) );\n BOOST_CHECK( plane_box.plane().n() == vector3_type(1,0,0) );\n\n plane_box.set_y_axis();\n BOOST_CHECK( plane_box.n() == vector3_type(0,1,0) );\n BOOST_CHECK( plane_box.plane().n() == vector3_type(0,1,0) );\n\n plane_box.set_z_axis();\n BOOST_CHECK( plane_box.n() == vector3_type(0,0,1) );\n BOOST_CHECK( plane_box.plane().n() == vector3_type(0,0,1) );\n\n plane_box.set_x_axis();\n BOOST_CHECK( plane_box.n() == vector3_type(1,0,0) );\n BOOST_CHECK( plane_box.plane().n() == vector3_type(1,0,0) );\n\n real_type w1 = plane_box.plane().w();\n plane_box.decrement();\n real_type w2 = plane_box.plane().w();\n BOOST_CHECK( w2 < w1 );\n plane_box.increment();\n real_type w3 = plane_box.plane().w();\n BOOST_CHECK( w3 > w2 );\n\n plane_box.set_x_axis();\n for(size_t i=0;i<500;++i)\n {\n plane_box.increment();\n BOOST_CHECK( plane_box.p0() <= max_coord );\n BOOST_CHECK( plane_box.p0() >= min_coord );\n BOOST_CHECK( plane_box.p1() <= max_coord );\n BOOST_CHECK( plane_box.p1() >= min_coord );\n BOOST_CHECK( plane_box.p2() <= max_coord );\n BOOST_CHECK( plane_box.p2() >= min_coord );\n BOOST_CHECK( plane_box.p3() <= max_coord );\n BOOST_CHECK( plane_box.p3() >= min_coord );\n }\n\n plane_box.set_y_axis();\n for(size_t i=0;i<500;++i)\n {\n plane_box.increment();\n BOOST_CHECK( plane_box.p0() <= max_coord );\n BOOST_CHECK( plane_box.p0() >= min_coord );\n BOOST_CHECK( plane_box.p1() <= max_coord );\n BOOST_CHECK( plane_box.p1() >= min_coord );\n BOOST_CHECK( plane_box.p2() <= max_coord );\n BOOST_CHECK( plane_box.p2() >= min_coord );\n BOOST_CHECK( plane_box.p3() <= max_coord );\n BOOST_CHECK( plane_box.p3() >= min_coord );\n }\n\n plane_box.set_z_axis();\n for(size_t i=0;i<500;++i)\n {\n plane_box.increment();\n BOOST_CHECK( plane_box.p0() <= max_coord );\n BOOST_CHECK( plane_box.p0() >= min_coord );\n BOOST_CHECK( plane_box.p1() <= max_coord );\n BOOST_CHECK( plane_box.p1() >= min_coord );\n BOOST_CHECK( plane_box.p2() <= max_coord );\n BOOST_CHECK( plane_box.p2() >= min_coord );\n BOOST_CHECK( plane_box.p3() <= max_coord );\n BOOST_CHECK( plane_box.p3() >= min_coord );\n }\n\n\n plane_box.set_x_axis();\n for(size_t i=0;i<500;++i)\n {\n plane_box.decrement();\n BOOST_CHECK( plane_box.p0() <= max_coord );\n BOOST_CHECK( plane_box.p0() >= min_coord );\n BOOST_CHECK( plane_box.p1() <= max_coord );\n BOOST_CHECK( plane_box.p1() >= min_coord );\n BOOST_CHECK( plane_box.p2() <= max_coord );\n BOOST_CHECK( plane_box.p2() >= min_coord );\n BOOST_CHECK( plane_box.p3() <= max_coord );\n BOOST_CHECK( plane_box.p3() >= min_coord );\n }\n\n plane_box.set_y_axis();\n for(size_t i=0;i<500;++i)\n {\n plane_box.decrement();\n BOOST_CHECK( plane_box.p0() <= max_coord );\n BOOST_CHECK( plane_box.p0() >= min_coord );\n BOOST_CHECK( plane_box.p1() <= max_coord );\n BOOST_CHECK( plane_box.p1() >= min_coord );\n BOOST_CHECK( plane_box.p2() <= max_coord );\n BOOST_CHECK( plane_box.p2() >= min_coord );\n BOOST_CHECK( plane_box.p3() <= max_coord );\n BOOST_CHECK( plane_box.p3() >= min_coord );\n }\n\n plane_box.set_z_axis();\n for(size_t i=0;i<500;++i)\n {\n plane_box.decrement();\n BOOST_CHECK( plane_box.p0() <= max_coord );\n BOOST_CHECK( plane_box.p0() >= min_coord );\n BOOST_CHECK( plane_box.p1() <= max_coord );\n BOOST_CHECK( plane_box.p1() >= min_coord );\n BOOST_CHECK( plane_box.p2() <= max_coord );\n BOOST_CHECK( plane_box.p2() >= min_coord );\n BOOST_CHECK( plane_box.p3() <= max_coord );\n BOOST_CHECK( plane_box.p3() >= min_coord );\n }\n\n}\n\nBOOST_AUTO_TEST_SUITE_END();\n", "meta": {"hexsha": "0cf33d1f4773d8565dc53bc0269a998098bacd8f", "size": 4964, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "unit_tests/core/geometry/plane_box/src/unit_plane_box.cpp", "max_stars_repo_name": "ricortiz/OpenTissue", "max_stars_repo_head_hexsha": "f8c8ebc5137325b77ba90bed897f6be2795bd6fb", "max_stars_repo_licenses": ["Zlib"], "max_stars_count": 76.0, "max_stars_repo_stars_event_min_datetime": "2018-02-20T11:30:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T12:45:06.000Z", "max_issues_repo_path": "unit_tests/core/geometry/plane_box/src/unit_plane_box.cpp", "max_issues_repo_name": "ricortiz/OpenTissue", "max_issues_repo_head_hexsha": "f8c8ebc5137325b77ba90bed897f6be2795bd6fb", "max_issues_repo_licenses": ["Zlib"], "max_issues_count": 27.0, "max_issues_repo_issues_event_min_datetime": "2018-11-20T14:32:49.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-24T15:26:45.000Z", "max_forks_repo_path": "unit_tests/core/geometry/plane_box/src/unit_plane_box.cpp", "max_forks_repo_name": "ricortiz/OpenTissue", "max_forks_repo_head_hexsha": "f8c8ebc5137325b77ba90bed897f6be2795bd6fb", "max_forks_repo_licenses": ["Zlib"], "max_forks_count": 24.0, "max_forks_repo_forks_event_min_datetime": "2018-02-21T01:45:26.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-07T07:06:49.000Z", "avg_line_length": 33.0933333333, "max_line_length": 78, "alphanum_fraction": 0.6730459307, "num_tokens": 1380, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8175744850834649, "lm_q2_score": 0.6113819732941511, "lm_q1q2_score": 0.49985030200527825}} {"text": "#include \n#include \n#define BOOST_UBLAS_NO_ELEMENT_PROXIES\n#include \n#include \n#include \n#include \n#include \n#include \n#include \"print.hpp\"\n#include \"random.hpp\"\n\nnamespace ublas=boost::numeric::ublas;\nnamespace blas=boost::numeric::bindings::blas;\n\nint main(int argc, char *argv[]) {\n {\n typedef ublas::vector vector;\n typedef ublas::matrix matrix;\n typedef ublas::triangular_matrix matrix_l;\n typedef ublas::triangular_matrix matrix_u;\n typedef typename vector::size_type size_type;\n rand_normal::reset();\n size_type n=8;\n matrix_l A_l(n, n);\n matrix_u A_u(n, n);\n for (size_type j=0; j::get();\n A_l(j, j)=A_u(j, j);\n for (size_type i=0; i::get();\n A_l(j, i)=A_u(i, j);\n }\n }\n vector x(n);\n for (size_type i=0; i::get();\n vector y(n);\n for (size_type i=0; i::get();\n double alpha(rand_normal::get());\n matrix P;\n {\n P=ublas::outer_prod(alpha*x, y);\n P+=ublas::outer_prod(y, alpha*x);\n for (size_type j=0; j\n\nCx4 VBC(Cx4 &maps, Log &log)\n{\n Index const nc = maps.dimension(0);\n Index const nx = maps.dimension(1);\n Index const ny = maps.dimension(2);\n Index const nz = maps.dimension(3);\n\n Eigen::Map mat(maps.data(), nc, nx * ny * nz);\n log.info(\"VBC SVD size {}x{}\", mat.rows(), mat.cols());\n auto const svd = mat.bdcSvd(Eigen::ComputeThinV);\n Cx4 body(maps.dimensions());\n Eigen::Map bodymat(body.data(), nc, nx * ny * nz);\n bodymat = svd.matrixV().transpose();\n log.image(maps, \"vbc-maps.nii\");\n log.image(body, \"vbc-body.nii\");\n bodymat = bodymat.array().conjugate() / bodymat.array().abs();\n return body;\n}\n\nvoid VCC(Cx4 &data, Log &log)\n{\n Index const nc = data.dimension(0);\n Index const nx = data.dimension(1);\n Index const ny = data.dimension(2);\n Index const nz = data.dimension(3);\n\n // Assemble our virtual conjugate channels\n Cx4 cdata(nc, nx, ny, nz);\n FFT::Planned<5, 3> fft(cdata, log);\n cdata = data;\n log.image(cdata, \"vcc-cdata.nii\");\n fft.forward(cdata);\n log.image(cdata, \"vcc-cdata-ks.nii\");\n Cx4 rdata = cdata.slice(Sz4{0, 1, 1, 1}, Sz4{nc, nx - 1, ny - 1, nz - 1})\n .reverse(Eigen::array({false, true, true, true}))\n .conjugate();\n cdata.setZero();\n cdata.slice(Sz4{0, 1, 1, 1}, Sz4{nc, nx - 1, ny - 1, nz - 1}) = rdata;\n log.image(cdata, \"vcc-cdata-conj-ks.nii\");\n fft.reverse(cdata);\n log.image(cdata, \"vcc-cdata-conj.nii\");\n\n Cx3 phase(nx, ny, nz);\n phase.setZero();\n for (Index iz = 1; iz < nz; iz++) {\n for (Index iy = 1; iy < ny; iy++) {\n for (Index ix = 1; ix < nx; ix++) {\n Cx1 const vals = data.chip(iz, 3).chip(iy, 2).chip(ix, 1);\n Cx1 const cvals = cdata.chip(iz, 3).chip(iy, 2).chip(ix, 1).conjugate(); // Dot has a conj\n float const p = std::log(Dot(cvals, vals)).imag() / 2.f;\n phase(ix, iy, iz) = std::polar(1.f, -p);\n }\n }\n }\n log.image(phase, \"vcc-correction.nii\");\n log.info(\"Applying Virtual Conjugate Coil phase correction\");\n data = data * Tile(phase, nc);\n log.image(data, \"vcc-corrected.nii\");\n}\n\nCx3 Hammond(Cx4 const &maps, Log &log)\n{\n Index const nc = maps.dimension(0);\n Index const nx = maps.dimension(1);\n Index const ny = maps.dimension(2);\n Index const nz = maps.dimension(3);\n log.info(\"Combining images via the Hammond method\");\n\n Index const refSz = 9;\n Cropper refCrop(Sz3{nx, ny, nz}, Sz3{refSz, refSz, refSz}, log);\n Cx1 const ref =\n refCrop.crop4(maps).sum(Sz3{1, 2, 3}).conjugate() / refCrop.crop4(maps).sum(Sz3{1, 2, 3}).abs();\n\n using FixedOne = Eigen::type2index<1>;\n Eigen::IndexList rsh;\n rsh.set(0, nc);\n Eigen::IndexList brd;\n brd.set(1, nx);\n brd.set(2, ny);\n brd.set(3, nz);\n auto const broadcasted = ref.reshape(rsh).broadcast(brd);\n Cx3 const combined = (maps * broadcasted).sum(Sz1{0});\n Cx3 const rss = maps.square().sum(Sz1{0}).sqrt();\n log.image(combined, \"hammond-combined.nii\");\n log.image(rss, \"hammond-rss.nii\");\n\n return combined;\n}", "meta": {"hexsha": "a988d63b51515423a068402b239bbd4ccbcafc24", "size": 3162, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/vc.cpp", "max_stars_repo_name": "spinicist/riesling", "max_stars_repo_head_hexsha": "fa98ef1380345aa47d57ba91c970f37fe8fc5405", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14.0, "max_stars_repo_stars_event_min_datetime": "2021-02-08T21:28:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T08:08:50.000Z", "max_issues_repo_path": "src/vc.cpp", "max_issues_repo_name": "spinicist/riesling", "max_issues_repo_head_hexsha": "fa98ef1380345aa47d57ba91c970f37fe8fc5405", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 29.0, "max_issues_repo_issues_event_min_datetime": "2021-02-19T11:59:33.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-24T20:45:57.000Z", "max_forks_repo_path": "src/vc.cpp", "max_forks_repo_name": "spinicist/riesling", "max_forks_repo_head_hexsha": "fa98ef1380345aa47d57ba91c970f37fe8fc5405", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2021-07-29T14:54:25.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-14T10:59:05.000Z", "avg_line_length": 32.9375, "max_line_length": 100, "alphanum_fraction": 0.623655914, "num_tokens": 1094, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8175744717487329, "lm_q2_score": 0.6113819732941511, "lm_q1q2_score": 0.4998502938526635}} {"text": "// Copyright 2008 Chung-Lin Wen.\n// Distributed under the Boost Software License, Version 1.0. (See\n// accompanying file LICENSE_1_0.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt)\n\n/*************************************************************************************************/\n\n#ifndef GIL_LAB_H\n#define GIL_LAB_H\n\n////////////////////////////////////////////////////////////////////////////////////////\n/// \\file\n/// \\brief Support for CIE Lab color space\n/// \\author Chung-Lin Wen \\n\n////////////////////////////////////////////////////////////////////////////////////////\n\n#include \n#include \n\nnamespace boost { namespace gil {\n\n/// \\addtogroup ColorNameModel\n/// \\{\nnamespace lab_color_space\n{\n/// \\brief Luminance\nstruct luminance_t {}; \n/// \\brief a Color Component\nstruct a_color_opponent_t {};\n/// \\brief b Color Component\nstruct b_color_opponent_t {}; \n}\n/// \\}\n\n/// \\ingroup ColorSpaceModel\ntypedef mpl::vector3< lab_color_space::luminance_t\n , lab_color_space::a_color_opponent_t\n , lab_color_space::b_color_opponent_t\n > lab_t;\n\n/// \\ingroup LayoutModel\ntypedef layout lab_layout_t;\n\n\nGIL_DEFINE_ALL_TYPEDEFS( 32f, lab );\n\n/// \\ingroup ColorConvert\n/// \\brief RGB to LAB\ntemplate <>\nstruct default_color_converter_impl< rgb_t, lab_t >\n{\n template \n void operator()( const P1& src, P2& dst ) const\n {\n using namespace lab_color_space;\n\n // only bits32f for lab is supported\n bits32f temp_red = channel_convert( get_color( src, red_t() ));\n bits32f temp_green = channel_convert( get_color( src, green_t() ));\n bits32f temp_blue = channel_convert( get_color( src, blue_t() ));\n\n // first, transfer to xyz color space\n bits32f normalized_r = temp_red / 255.f;\n bits32f normalized_g = temp_green / 255.f;\n bits32f normalized_b = temp_blue / 255.f;\n\n if( normalized_r > 0.04045f )\n {\n\t \t normalized_r = pow( (( normalized_r + 0.055f ) / 1.055f ), 2.4f );\n }\n\t else\n\t {\n\t \t normalized_r /= 12.92f;\n }\n\n if( normalized_g > 0.04045f )\n {\n\t \t normalized_g = pow((( normalized_g + 0.055f ) / 1.055f ), 2.4f );\n }\n\t else\n\t {\n\t \t normalized_g /= 12.92f;\n }\n\n if( normalized_b > 0.04045f )\n {\n\t \t normalized_b = pow( (( normalized_b + 0.055f ) / 1.055f ), 2.4f );\n }\n\t else\n\t {\n\t \t normalized_b /= 12.92f;\n }\n\n\t normalized_r *= 100.f;\n\t normalized_g *= 100.f;\n\t normalized_b *= 100.f;\n\n bits32f x, y, z;\n x = normalized_r * 0.4124f + normalized_g * 0.3576f + normalized_b * 0.1805f;\n y = normalized_r * 0.2126f + normalized_g * 0.7152f + normalized_b * 0.0722f;\n z = normalized_r * 0.0193f + normalized_g * 0.1192f + normalized_b * 0.9505f;\n\n // then, transfer to lab color space\n bits32f ref_x = 95.047f;\n bits32f ref_y = 100.000f;\n bits32f ref_z = 108.883f;\n bits32f normalized_x = x / ref_x;\n bits32f normalized_y = y / ref_y;\n bits32f normalized_z = z / ref_z;\n\n if( normalized_x > 0.008856f )\n {\n normalized_x = pow( normalized_x, 0.333f );\n }\n\t else\n\t {\n\t \t normalized_x = (7.787f * normalized_x) + ( 16.f/116.f );\n }\n\n if( normalized_y > 0.008856f )\n {\n\t \t normalized_y = pow( normalized_y, 0.333f );\n }\n\t else\n\t {\n\t \t normalized_y = (7.787f * normalized_y) + ( 16.f/116.f );\n }\n\n\t if( normalized_z > 0.008856f )\n\t {\n\t \t normalized_z = pow( normalized_z, 0.333f );\n }\n\t else\n\t {\n\t \t normalized_z = ( 7.787f * normalized_z ) + ( 16.f/116.f );\n }\n\n bits32f luminance, a_color_opponent, b_color_opponent;\n luminance = ( 116.f * normalized_y ) - 16.f;\n a_color_opponent = 500.f * ( normalized_x - normalized_y );\n b_color_opponent = 200.f * ( normalized_y - normalized_z );\n\n get_color( dst, luminance_t() ) = luminance;\n get_color( dst, a_color_opponent_t() ) = a_color_opponent;\n get_color( dst, b_color_opponent_t() ) = b_color_opponent;\n }\n};\n\n/// \\ingroup ColorConvert\n/// \\brief LAB to RGB\ntemplate <>\nstruct default_color_converter_impl\n{\n template \n void operator()( const P1& src, P2& dst) const\n {\n using namespace lab_color_space;\n\n bits32f luminance = get_color( src, luminance_t() );\n bits32f a_color_opponent = get_color( src, a_color_opponent_t() );\n bits32f b_color_opponent = get_color( src, b_color_opponent_t() );\n\n // first, transfer to xyz color space\n bits32f normalized_y = ( luminance + 16.f ) / 116.f;\n bits32f normalized_x = ( a_color_opponent / 500.f ) + normalized_y;\n bits32f normalized_z = normalized_y - ( b_color_opponent / 200.f );\n\n if( pow( normalized_y, 3.f ) > 0.008856f ) \n {\n normalized_y = pow( normalized_y, 3.f );\n }\n else\n {\n normalized_y = ( normalized_y - 16.f / 116.f ) / 7.787f;\n }\n\n if( pow( normalized_x, 3.f ) > 0.008856f ) \n {\n normalized_x = pow( normalized_x, 3.f );\n }\n else\n {\n normalized_x = ( normalized_x - 16.f / 116.f ) / 7.787f;\n }\n\n if( pow( normalized_z, 3.f ) > 0.008856f )\n {\n normalized_z = pow( normalized_z, 3.f );\n }\n else\n {\n normalized_z = ( normalized_z - 16.f / 116.f ) / 7.787f;\n }\n\n bits32f reference_x = 95.047f;\n bits32f reference_y = 100.000f;\n bits32f reference_z = 108.883f;\n bits32f x, y, z;\n x = reference_x * normalized_x;\n y = reference_y * normalized_y;\n z = reference_z * normalized_z;\n\n // then, transfer to rgb color space\n normalized_x = x / 100.f;\n normalized_y = y / 100.f;\n normalized_z = z / 100.f;\n\n bits32f result_r = normalized_x * 3.2406f + normalized_y * -1.5372f + normalized_z * -0.4986f;\n bits32f result_g = normalized_x * -0.9689f + normalized_y * 1.8758f + normalized_z * 0.0415f;\n bits32f result_b = normalized_x * 0.0557f + normalized_y * -0.2040f + normalized_z * 1.0570f;\n\n if( result_r > 0.0031308f )\n { \n result_r = 1.055f * pow( result_r, 1.f/2.4f ) - 0.055f;\n }\n else\n {\n result_r = 12.92f * result_r;\n }\n\n if( result_g > 0.0031308f ) \n {\n result_g = 1.055f * pow( result_g, 1.f/2.4f ) - 0.055f;\n }\n else\n {\n result_g = 12.92f * result_g;\n }\n\n if( result_b > 0.0031308f )\n {\n result_b = 1.055f * pow( result_b, 1.f/2.4f ) - 0.055f;\n }\n else\n {\n result_b = 12.92f * result_b;\n }\n\n bits32f red, green, blue;\n red = result_r * 255.f;\n green = result_g * 255.f;\n blue = result_b * 255.f;\n\n get_color(dst,red_t()) =\n channel_convert::type>( red );\n get_color(dst,green_t())=\n channel_convert::type>( green );\n get_color(dst,blue_t()) =\n channel_convert::type>( blue );\n }\n};\n\n} } // namespace boost::gil\n\n#endif // GIL_LAB_H\n", "meta": {"hexsha": "cc05960a8d8738150e74095e9823034c1325073c", "size": 7257, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "3rdparty/boost/boost/gil/extension/toolbox/lab.hpp", "max_stars_repo_name": "Greentwip/windy", "max_stars_repo_head_hexsha": "4eb8174f952c5b600ff004827a5c85dbfb013091", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2017-07-13T21:11:55.000Z", "max_stars_repo_stars_event_max_datetime": "2017-07-13T21:11:55.000Z", "max_issues_repo_path": "3rdparty/boost/boost/gil/extension/toolbox/lab.hpp", "max_issues_repo_name": "Greentwip/Windy", "max_issues_repo_head_hexsha": "4eb8174f952c5b600ff004827a5c85dbfb013091", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "3rdparty/boost/boost/gil/extension/toolbox/lab.hpp", "max_forks_repo_name": "Greentwip/Windy", "max_forks_repo_head_hexsha": "4eb8174f952c5b600ff004827a5c85dbfb013091", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.34765625, "max_line_length": 101, "alphanum_fraction": 0.569656883, "num_tokens": 2170, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8175744673038221, "lm_q2_score": 0.6113819732941511, "lm_q1q2_score": 0.4998502911351252}} {"text": "// Implementation for kc_test\n//\n\n#include \"stdafx.h\"\n#include \n#include \"funcs.h\"\n#include \"ck_sdk.h\"\n#include \"KCSdkUtilities.h\"\n#include \"SMask.h\"\n#include \"TestUtil.h\"\n#include \"splines.h\"\n#include \"SplineHelper.h\"\n#include \n#include \n#include \n#include \n\nconst double MIN_TOL = .00005;\nusing namespace Eigen;\nusing timer = std::chrono::steady_clock;\nusing std::chrono::time_point;\nusing std::chrono::duration_cast;\nusing std::chrono::milliseconds;\ntypedef Matrix Points;\nint TestSplineLibrary() {\n\tint status = CKNoError;\n\tCKPart part = CKGetActivePart();\n\tif (!part.IsValid()) {\n\t\treturn CK_NO_PART;\n\t}\n\tstd::vector control_points; // Holds the 4 control points for entire segment\n std::vector coeffs;\n CKSMatrix worldMat;\n CKSEntityArray curves = CurvesSelect(part);\n status = GetCoeffFromCurves(part, curves, coeffs, .0000001);\n CKSEntity spline = part.AddSpline(true, false, coeffs, NULL, &worldMat);\n //CKSEntity spline = SplineSelect(part);\n if (!spline.IsValid()) {\n\t\tMessageBox(nullptr, _T(\"Error with spline selection\"), _T(\"Spline Data\"), MB_OK_STOP);\n\t\tstatus = CKError;\n\t}\n\telse {\n\t\tbool is3D = false;\n\t\tbool isClosed = false;\n\n HPMatrix splineMatrix;\n\t\tCKSCoordArray nodePoints;\n\t\tCKSCoord startVector, endVector;\n\t\tpart.GetSpline(spline, NULL, coeffs, is3D, isClosed, NULL, &splineMatrix);\n\t\tpart.GetSpline(spline, NULL, nodePoints, &startVector, &endVector, is3D, isClosed, NULL, &splineMatrix);\n\t\tsize_t blockSize = 0;\n\t\tif (is3D) {\n\t\t\tblockSize = 12;\n\t\t}\n\t\telse {\n\t\t\tblockSize = 8;\n\t\t}\n\t\tdouble param[3][4] = { 0.0 }; // Array to hold current segment coefficients\n\t\tfor (size_t i = 0; i < coeffs.size(); i += blockSize) {\n\t\t\tif (is3D) {\n\t\t\t\tfor (size_t j = 0; j < 3; ++j) {\n\t\t\t\t\tfor (size_t k = 0; k < 4; ++k) {\n\t\t\t\t\t\tparam[j][k] = coeffs[i + (j * 4) + k];\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\telse { // 2D Spline\n\t\t\t\tfor (size_t j = 0; j < 2; ++j) {\n\t\t\t\t\tfor (size_t k = 0; k < 4; ++k) {\n\t\t\t\t\t\tparam[j][k] = coeffs[i + (j * 4) + k];\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tGetSplineControlPoints(param, control_points);\n\t\t}\n\n\t\t//// Test old normal function using 2nd derivative\n\t\t//CKSCoord old_normal;\n\t\t//CKSCoord old_tangent;\n\t\t//CKSCoord old_position;\n\n\t\t//CKSMath::Evaluate(spline, NULL, false, false, false, 1, .0625, &splineMatrix, &old_position,\n\t\t//\t&old_normal, &old_tangent);\n\t\t////GetSplineNormal(coeffs, 1, .5, old_normal);\n\t\t//if (old_normal.Magnitude() > .001) {\n\t\t//\told_normal.Normalize();\n\t\t//}\n\t\t//if (old_tangent.Magnitude() > .001) {\n\t\t//\told_tangent.Normalize();\n\t\t//}\n\n\t\t////GetSplineCoord(coeffs, 1, .5, old_position);\n\t\t//old_normal = old_position + old_normal;\n\t\t//old_tangent = old_position + old_tangent;\n\t\t//part.AddPoint(old_position);\n\t\t//part.AddPoint(old_normal);\n\t\t//part.AddPoint(old_tangent);\n\t\t//part.NoteState();\n\n\t\t//// Test 2D spline points\n\t\t//CKSCoord2D cp_2d;\n\t\t//std::vector points_2d;\n\t\t//for (auto i : control_points) {\n\t\t//\tcp_2d.m_dX = i.m_dX;\n\t\t//\tcp_2d.m_dY = i.m_dY;\n\t\t//\tpoints_2d.push_back(cp_2d);\n\t\t//}\n\t\tconst size_t degree = 3;\n\n\t\t//// Test elevate degree 2d. Works\n\t\t//std::vector elevated_points = bezier::ElevateDegree(points_2d, bezier::Dimension::k2d, degree);\n\t\t//for (auto i: elevated_points) {\n\t\t//\tpart.AddPoint(i.m_dX, i.m_dY, 0.0);\n\t\t//}\n\t\t//part.NoteState();\n\n\t\t//// Test segment split 2d. Works.\n\t\t//std::vector split_points;\n\t\t//for (size_t i = 0; i < points_2d.size() / (degree + 1); ++i) {\n\t\t//\tsplit_points = bezier::SplitSegment(points_2d, .5, i, degree, 2);\n\t\t//\tfor (auto j : split_points) {\n\t\t//\t\tpart.AddPoint(j.m_dX, j.m_dY, 0.0);\n\t\t//\t}\n\t\t//}\n\t\t//part.NoteState();\n\n\t\t//// Create spline from calculated coefficients 2d\n\t\t//for (size_t i = 0; i < 2; ++i) {\n\t\t//\tstd::vector new_coeff = bezier::GetCoefficients(split_points, i, degree, bezier::k2d);\n\t\t//\tpart.AddSpline(false, false, kc_coeff);\n\t\t//}\n\t\t//part.NoteState();\n\n\t\t//// Test tangent and normal functions 2d. Works\n\t\t//CKSCoord2D coordinate;\n\t\t//CKSCoord2D tangent;\n\t\t//CKSCoord2D normal;\n\t\t//for (size_t i = 0; i < points_2d.size() / 4; ++i) {\n // for (size_t j = 0; j < 4; ++j) {\n // double t = j / 4.0;\n // coordinate = bezier::GetPosition(points_2d, t, i, degree, bezier::k2d);\n // part.AddPoint(coordinate.m_dX, coordinate.m_dY, 0.0);\n // tangent = bezier::GetFirstDerivative(points_2d, t, i, degree, bezier::k2d);\n // normal = bezier::GetNormal(points_2d, t, i, degree, bezier::k2d);\n // CKSMatrix temp;\n // CKSCoord v1(coordinate.m_dX, coordinate.m_dY, 0.0);\n // CKSCoord v2(tangent.m_dX, tangent.m_dY, 0.0);\n // CKSCoord v3(normal.m_dX, normal.m_dY, 0.0);\n // CKSMath::MatrixVector(v1, v1 + v2, temp);\n // CKEntityAttrib attrib;\n // attrib.m_ucColorNumber = 7;\n // part.AddVector(.25, &temp, &attrib);\n // attrib.m_ucColorNumber = 10;\n // CKSMath::MatrixVector(v1, v1 + v3, temp);\n // part.AddVector(.25, &temp, &attrib);\n // }\n\t\t//}\n\t\t//part.NoteState();\n\n\t\t//// Test elevate degree 3d. Works\n\t\t//CKSCoordArray elevated_points = bezier::ElevateDegree(control_points, bezier::Dimension::k3d, 3);\n\t\t//for (size_t i = 0; i < elevated_points.size(); ++i) {\n\t\t//\tpart.AddPoint(elevated_points[i]);\n\t\t//}\n\t\t//part.NoteState();\n\n\t\t// Test segment split 3d. Works.\n\t\t//const size_t degree = 3;\n\t\t//std::vector split_points;\n\t\t//for (size_t i = 0; i < control_points.size() / (degree + 1); ++i) {\n\t\t//\tsplit_points = bezier::SplitSegment(control_points, .5, i, degree, 3);\n\t\t//\tfor (auto j : split_points) {\n\t\t//\t\tpart.AddPoint(j);\n\t\t//\t}\n\t\t//}\n\t\t//part.NoteState();\n\n\t\t// Create spline from calculated coefficients 3d. Works\n\t\t//for (size_t i = 0; i < 2; ++i) {\n\t\t//\tstd::vector new_coeff = bezier::GetCoefficients(split_points, i);\n\t\t//\tpart.AddSpline(true, false, kc_coeff);\n\t\t//}\n\t\t//part.NoteState();\n\n\t\t//// Test tangent and normal functions 3d. Works\n\t\t//CKSCoord coordinate;\n\t\t//CKSCoord tangent;\n\t\t//CKSCoord normal;\n\t\t//CKSCoord curvature;\n\t\t//for (size_t i = 0; i < control_points.size() / 4; ++i) {\n\t\t//\tfor (auto j = 0; j < 5; ++j) {\n\t\t//\t\tdouble t = j / 4.0;\n\t\t//\t\tcoordinate = bezier::GetPosition(control_points, t, i, 3, bezier::k3d);\n\t\t//\t\tpart.AddPoint(coordinate);\n\t\t//\t\ttangent = bezier::GetFirstDerivative(control_points, t, i, 3, bezier::k3d);\n\t\t//\t\tnormal = bezier::GetNormal(control_points, t, i, 3, bezier::k3d);\n\t\t//\t\tcurvature = bezier::GetSecondDerivative(control_points, t, i, 3, bezier::k3d);\n\t\t//\t\ttangent.Normalize();\n\t\t//\t\tnormal.Normalize();\n\t\t//\t\tcurvature.Normalize();\n\t\t//\t\tCKSMatrix temp;\n\t\t//\t\tCKSMath::MatrixVector(coordinate, coordinate + tangent, temp);\n\t\t//\t\tCKEntityAttrib attrib;\n\t\t//\t\tattrib.m_ucColorNumber = 7;\n\t\t//\t\tpart.AddVector(1.0, &temp, &attrib);\n\t\t//\t\tattrib.m_ucColorNumber = 10;\n\t\t//\t\tCKSMath::MatrixVector(coordinate, coordinate + normal, temp);\n\t\t//\t\tpart.AddVector(1.0, &temp, &attrib);\n\t\t//\t\t//attrib.m_ucColorNumber = 2;\n\t\t//\t\t//CKSMath::MatrixVector(coordinate, coordinate + curvature, temp);\n\t\t//\t\t//part.AddVector(1.0, &temp, &attrib);\n\t\t//\t}\n\t\t//}\n //part.DeleteEntity(spline);\n //part.NoteState();\n\n coeffs.clear();\n coeffs = bezier::GetCoefficients(control_points);\n size_t coeffs_size = coeffs.size();\n size_t control_point_set = degree + 1;\n size_t segment_size = control_point_set * 3;\n size_t segment_count = coeffs_size / segment_size;\n std::vector quad_coeffs(3);\n std::vector linear_coeffs(2);\n std::vector t_values;\n for (size_t i = 0; i < segment_count; ++i) {\n for (size_t j = 0; j < 3; ++j) {\n for (size_t k = 0; k < 3; ++k) {\n quad_coeffs[k] = coeffs[(i * segment_size) + (j * 4) + k] * (3 - k);\n if (k < 2) {\n linear_coeffs[k] = quad_coeffs[k] * (2 - k);\n }\n }\n bezier::SolveQuadratic(quad_coeffs, t_values);\n t_values.push_back(bezier::SolveLinear(linear_coeffs));\n }\n }\n //auto t_end = std::remove_if(t_values.begin(), t_values.end(), [](double a) {\n // return (a <= 0.0 || a >= 1.0);\n //});\n //t_values.erase(t_end, t_values.end());\n t_values.push_back(0.0);\n t_values.push_back(1.0);\n for (auto it : t_values) {\n if (it >= 0.0 && it <= 1.0) {\n CKSCoord position = bezier::GetPosition(control_points, it);\n part.AddPoint(position);\n }\n }\n part.NoteState();\n WriteData(\"nodes.dat\", nodePoints);\n\t\tWriteCoefficients(\"coeff.dat\", coeffs);\n WriteCoefficients(\"derivatives.dat\", t_values);\n\t\tWriteControlPoints(\"ctrl.dat\", control_points);\n\t\tCKSCoordArray fitPoints;\n\t\tSplineToPoints(part, spline, fitPoints);\n\t\tWriteData(\"spline.dat\", fitPoints);\n\t\tfitPoints.clear();\n\t}\n\treturn status;\n}\n\nint SplineHelix()\n{\n CWnd* pWnd = AfxGetMainWnd();\n int status = CKNoError;\n CKPart part = CKGetActivePart();\n if (!part.IsValid())\n {\n return CK_NO_PART;\n }\n\n CKSMatrix worldMat;\n CKSMask mask;\n mask.AddEntity(CKMaskLine);\n mask.AddEntity(CKMaskArc);\n mask.AddEntity(CKMaskSpline);\n mask.AddEntity(CKMaskNURBSpline);\n mask.AddEntity(CKMaskPolyline);\n mask.AddEntity(CKMaskEllipse);\n mask.AddEntity(CKMaskParabola);\n mask.AddEntity(CKMaskHyperbola);\n\n // Test create helical spline along a 3D curve\n Events keyCheck;\n double diameter = 1.0;\n double pitch = 0.25;\n CRegistry reg;\n if (reg.KeyExists(_T(\"Software\\\\HPM\\\\HPMTools\\\\SplineHelix\")))\n {\n reg.SetKey(_T(\"Software\\\\HPM\\\\HPMTools\\\\SplineHelix\"), FALSE);\n diameter = reg.ReadFloat(_T(\"Diameter\"), 1.0);\n pitch = reg.ReadFloat(_T(\"Pitch\"), .25);\n }\n else\n {\n reg.CreateKey(_T(\"Software\\\\HPM\\\\HPMTools\\\\SplineHelix\"));\n reg.SetKey(_T(\"Software\\\\HPM\\\\HPMTools\\\\SplineHelix\"), FALSE);\n reg.WriteFloat(_T(\"Diameter\"), 1.0);\n reg.WriteFloat(_T(\"Pitch\"), .25);\n }\n int step = 0;\n while (true)\n {\n switch (step)\n {\n case 0:\n {\n keyCheck = ck_get_input(_T(\"Enter diameter: \"), _T(\"\"), diameter, true, 0, CKS::GreaterThan, 0.0);\n switch (keyCheck)\n {\n case CKBackup:\n case CKEscape:\n return keyCheck;\n case CKNoError:\n {\n reg.WriteFloat(_T(\"Diameter\"), diameter);\n step++;\n break;\n }\n default:\n return keyCheck;\n }\n }\n case 1:\n {\n keyCheck = ck_get_input(_T(\"Enter pitch: \"), _T(\"\"), pitch);\n switch (keyCheck)\n {\n case CKBackup:\n {\n step--;\n continue;\n }\n case CKEscape:\n return keyCheck;\n case CKNoError:\n {\n reg.WriteFloat(_T(\"Pitch\"), pitch);\n step++;\n break;\n }\n default:\n return keyCheck;\n }\n if (CKSMath::CompareToZero(pitch, .01) <= 0)\n {\n pWnd->MessageBox(_T(\"The pitch entered is too small\"), MB_TITLE, MB_OK_INFO);\n step--;\n continue;\n }\n }\n case 2:\n {\n CKSEntityArray driveCurves;\n status = part.GenSel(_T(\"Select the sweep path chain of curves\"), driveCurves);\n switch (status)\n {\n case CKNoError:\n break;\n case CKBackup:\n {\n step--;\n continue;\n }\n default:\n if ((status < CKMenu1) || (status >= CKEscape))\n return status;\n //case CKEscape:\n //case CK_NO_PART:\n // return status;\n }\n CKSCoordArray helixPnts;\n CKSCoord startVec, endVec;\n status = GetHelicalSplinePoints(part, driveCurves, helixPnts, startVec, endVec, diameter, pitch, .0001);\n if (helixPnts.size())\n {\n std::ofstream out_file(\"helix.points\");\n for (auto p : helixPnts) {\n out_file << p.m_dX << '\\t' << p.m_dY << '\\t' << p.m_dZ << '\\n';\n }\n time_point start_time;\n time_point end_time;\n milliseconds elapsed;\n CString eigen_time;\n CString kc_time;\n CKSEntity helicalSpline;\n CKEntityAttrib attrib;\n\n start_time = timer::now();\n size_t point_count = helixPnts.size();\n Points points(3, point_count);\n for (size_t i = 0; i < point_count; ++i) {\n points(0, i) = helixPnts[i][0];\n points(1, i) = helixPnts[i][1];\n points(2, i) = helixPnts[i][2];\n }\n Spline3d testspline = SplineFitting::Interpolate(points, 3);\n size_t knots_size = testspline.knots().size();\n std::vector knots(knots_size);\n for (size_t j = 0; j < knots_size; ++j) {\n knots[j] = testspline.knots().data()[j];\n }\n Matrix control_points = testspline.ctrls();\n size_t ctrl_size = control_points.cols();\n std::vector ctrl_points(ctrl_size);\n for (size_t j = 0; j < ctrl_size; ++j) {\n ctrl_points[j].m_dX = control_points(0, j);\n ctrl_points[j].m_dY = control_points(1, j);\n ctrl_points[j].m_dZ = control_points(2, j);\n }\n attrib.m_ucColorNumber = 7;\n std::vector weights(ctrl_size, 1.0);\n helicalSpline = part.AddNURBSpline(3, true, false, knots, ctrl_points, weights, &attrib);\n end_time = timer::now();\n elapsed = duration_cast(end_time - start_time);\n eigen_time.Format(_T(\"Eigen Spline creation time: %d ms\\n\"), elapsed.count());\n part.NoteState();\n helicalSpline = part.AddSpline(true, false, true, true, startVec, endVec, helixPnts, NULL, &worldMat);\n part.NoteState();\n //helicalSpline = part.AddSpline(true, false, false, false, startVec, endVec, helixPnts, NULL, &worldMat);\n //start_time = timer::now();\n //attrib.m_ucColorNumber = 9;\n //helicalSpline = part.AddNURBSpline(3, true, false, helixPnts, &attrib, &worldMat);\n //end_time = timer::now();\n //elapsed = duration_cast(end_time - start_time);\n //kc_time.Format(_T(\"KC spline time: %d ms\\n\"), elapsed.count());\n //part.NoteState();\n if (!helicalSpline.IsValid()) {\n pWnd->MessageBox(_T(\"Error creating helical spline\"), MB_TITLE, MB_OK_STOP);\n return CKError;\n }\n CString all_time = eigen_time + kc_time;\n //CString all_time = eigen_time;\n pWnd->MessageBox(all_time, MB_TITLE, MB_OK_STOP);\n }\n }\n }\n }\n return status;\n}\n", "meta": {"hexsha": "b7dd9a3cd41335ab4ab028698d9d01ea268535a0", "size": 14428, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "kc_test/kc_testFuncs.cpp", "max_stars_repo_name": "hpmachining/splines", "max_stars_repo_head_hexsha": "9df0e51eac3169f0f518159752719f3b0fbfdb9c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2018-07-22T15:29:18.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-04T21:31:33.000Z", "max_issues_repo_path": "kc_test/kc_testFuncs.cpp", "max_issues_repo_name": "hpmachining/splines", "max_issues_repo_head_hexsha": "9df0e51eac3169f0f518159752719f3b0fbfdb9c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kc_test/kc_testFuncs.cpp", "max_forks_repo_name": "hpmachining/splines", "max_forks_repo_head_hexsha": "9df0e51eac3169f0f518159752719f3b0fbfdb9c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6425339367, "max_line_length": 119, "alphanum_fraction": 0.6120044358, "num_tokens": 4539, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.798186787341014, "lm_q2_score": 0.6261241772283034, "lm_q1q2_score": 0.49976404549839515}} {"text": "/**\nThis file is part of Deformable Shape Tracking (DEST).\n\nCopyright(C) 2015/2016 Christoph Heindl\nAll rights reserved.\n\nThis software may be modified and distributed under the terms\nof the BSD license.See the LICENSE file for details.\n*/\n\n#define CATCH_CONFIG_MAIN\n#include \"catch.hpp\"\n\n#include \n#include \n\nTEST_CASE(\"similarity-transform-translate\")\n{\n dest::core::Shape to(2, 4);\n to << 0.f, 2.f, 2.f, 0.f,\n 0.f, 0.f, 2.f, 2.f;\n \n Eigen::AffineCompact2f t;\n t = Eigen::Translation2f(1.f, 1.f);\n \n dest::core::Shape from = t.matrix() * to.colwise().homogeneous();\n \n Eigen::AffineCompact3f s = dest::core::estimateSimilarityTransform(from, to);\n \n Eigen::AffineCompact2f expected;\n expected = Eigen::Translation2f(-1.f, -1.f);\n\n REQUIRE(s.isApprox(expected));\n\n}\n\nTEST_CASE(\"similarity-transform-compound\")\n{\n dest::core::Shape to(2, 4);\n to << 0.f, 2.f, 2.f, 0.f,\n 0.f, 0.f, 2.f, 2.f;\n \n Eigen::AffineCompact2f t;\n t = Eigen::Translation2f(1.f, 1.f) * Eigen::Rotation2Df(0.17f) * Eigen::Scaling(1.8f);\n \n dest::core::Shape from = t.matrix() * to.colwise().homogeneous();\n \n Eigen::AffineCompact3f s = dest::core::estimateSimilarityTransform(from, to);\n \n Eigen::AffineCompact2f expected = t.inverse();\n \n REQUIRE(s.isApprox(expected)); \n}\n\n\nTEST_CASE(\"similarity-transform-between-rects\")\n{\n dest::core::Rect r = dest::core::createRectangle(Eigen::Vector2f(-2.f, -2.f), Eigen::Vector2f(2.f, 2.f));\n\n Eigen::AffineCompact2f t;\n t = Eigen::Rotation2Df(0.17f);\n\n r = t.matrix() * r.colwise().homogeneous();\n\n dest::core::Rect n = dest::core::unitRectangle();\n Eigen::AffineCompact3f s = dest::core::estimateSimilarityTransform(r, n);\n\n r = s.matrix() * r.colwise().homogeneous();\n REQUIRE(r.isApprox(n));\n}", "meta": {"hexsha": "bf96b53dc6068cb1be60dbdd16370326b31bf247", "size": 1860, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/test_transform.cpp", "max_stars_repo_name": "wangqianyun001/Facial-Landmark-Detection", "max_stars_repo_head_hexsha": "b8e2bc6b210ad2adea35f17fa8a8e58ac695e8f5", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_transform.cpp", "max_issues_repo_name": "wangqianyun001/Facial-Landmark-Detection", "max_issues_repo_head_hexsha": "b8e2bc6b210ad2adea35f17fa8a8e58ac695e8f5", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_transform.cpp", "max_forks_repo_name": "wangqianyun001/Facial-Landmark-Detection", "max_forks_repo_head_hexsha": "b8e2bc6b210ad2adea35f17fa8a8e58ac695e8f5", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5714285714, "max_line_length": 109, "alphanum_fraction": 0.6456989247, "num_tokens": 571, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7981867777396211, "lm_q2_score": 0.6261241702517975, "lm_q1q2_score": 0.49976403391817614}} {"text": "#include \n\n#include \n#include \n#include \n\nusing std::vector;\n\nnamespace BlitzML {\n\nvalue_t LassoSolver::compute_dual_obj() const { \n value_t loss = 0.5 * l2_norm_sq(x);\n return -(loss + l1_penalty * l1_norm(omega));\n}\n\n\nvalue_t LassoSolver::compute_primal_obj_x() const {\n value_t ip = inner_product(data->b_values(), &x[0], num_examples);\n return 0.5 * sq(kappa_x) * l2_norm_sq(x) + kappa_x * ip;\n}\n\n\nvalue_t LassoSolver::compute_primal_obj_y() const {\n value_t ip = inner_product(data->b_values(), &y[0], num_examples);\n return 0.5 * l2_norm_sq(y) + ip;\n}\n\n\nvalue_t LassoSolver::update_coordinates_in_working_set() {\n value_t ret = 0.;\n for (const_index_itr i = ws.begin(); i != ws.end(); ++i) {\n value_t subgrad = update_feature_lasso(*i);\n ret += sq(subgrad);\n }\n ws.shuffle();\n return ret;\n}\n\n\ninline value_t LassoSolver::update_feature_lasso(index_t i) {\n value_t inv_L = inv_lipschitz_cache[i];\n if (inv_L < 0) {\n return 0.;\n }\n\n const Column& col = *A_cols[i];\n value_t current_value = omega[i];\n value_t grad = col.inner_product(x) \n + num_examples * Delta_bias * col_means_cache[i];\n if (current_value == 0. && fabs(grad) < l1_penalty) {\n return 0.;\n }\n\n value_t pre_shrink = current_value - grad * inv_L;\n value_t new_value = soft_threshold(pre_shrink, l1_penalty * inv_L);\n value_t delta = new_value - current_value;\n if (delta == 0.) {\n return 0.;\n }\n col.add_multiple(x, delta);\n omega[i] = new_value;\n if (use_bias) {\n Delta_bias -= col_means_cache[i] * delta;\n }\n\n return grad + sign(current_value) * l1_penalty;\n}\n\n\nvoid LassoSolver::update_bias(int max_newton_itr) {\n if (!use_bias) {\n return;\n }\n value_t grad = sum_vector(x);\n value_t delta = -grad / x.size();\n bias += delta;\n add_scalar_to_vector(x, delta);\n} \n\n\nvoid LassoSolver::perform_backtracking() {\n if (use_bias) {\n add_scalar_to_vector(x, Delta_bias);\n bias += Delta_bias;\n }\n}\n\n\nvoid LassoSolver::setup_proximal_newton_problem() { \n Delta_bias = 0.;\n}\n\n}\n\n", "meta": {"hexsha": "cdb91c5fa68fd1a3f0a4103abd6e01e52663781e", "size": 2158, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/sparse_linear/lasso_solver.cpp", "max_stars_repo_name": "tbjohns/BlitzML", "max_stars_repo_head_hexsha": "0523743e1ae3614bfe3f16aa226d7a27fab2d623", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 6.0, "max_stars_repo_stars_event_min_datetime": "2015-06-16T05:17:17.000Z", "max_stars_repo_stars_event_max_datetime": "2018-08-02T05:50:01.000Z", "max_issues_repo_path": "src/sparse_linear/lasso_solver.cpp", "max_issues_repo_name": "tbjohns/BlitzML", "max_issues_repo_head_hexsha": "0523743e1ae3614bfe3f16aa226d7a27fab2d623", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2018-05-13T13:53:58.000Z", "max_issues_repo_issues_event_max_datetime": "2019-06-11T14:53:26.000Z", "max_forks_repo_path": "src/sparse_linear/lasso_solver.cpp", "max_forks_repo_name": "tbjohns/BlitzML", "max_forks_repo_head_hexsha": "0523743e1ae3614bfe3f16aa226d7a27fab2d623", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2018-08-02T05:50:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-21T04:44:15.000Z", "avg_line_length": 22.7157894737, "max_line_length": 75, "alphanum_fraction": 0.6742354032, "num_tokens": 635, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.798186768138228, "lm_q2_score": 0.6261241772283034, "lm_q1q2_score": 0.49976403347506654}} {"text": "#include \n", "meta": {"hexsha": "217ea655eb1c41a824000769f365fba4460ad3b5", "size": 55, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_math_distributions_hypergeometric.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_math_distributions_hypergeometric.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_math_distributions_hypergeometric.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 27.5, "max_line_length": 54, "alphanum_fraction": 0.8363636364, "num_tokens": 12, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8596637648915617, "lm_q2_score": 0.5813030906443134, "lm_q1q2_score": 0.4997252034463912}} {"text": "\r\n\r\n#include \r\nUSING_BIO_NS\r\n\r\n#include \r\n#include \r\n#include \r\n#undef max\r\n#include \r\n#include \r\nusing namespace boost;\r\nusing namespace boost::assign;\r\nusing boost::unit_test::test_suite;\r\n\r\n#include \r\nusing namespace std;\r\n\r\n#include \n\r\n#include \"blas1c.h\"\n#include \"lapackc.h\"\n#include \"arlsmat.h\"\n//#include \"arcomp.h\"\n#include \"arlsmat.h\"\n#include \"arlnsmat.h\"\n#include \"arlssym.h\"\n#include \"arlgsym.h\"\n#include \"arlsnsym.h\"\n#include \"arlgnsym.h\"\n#include \"arlscomp.h\"\n#include \"arlgcomp.h\"\n\r\n\r\n\r\n#define VERBOSE_CHECKING\r\n\r\n\n\ntemplate \nint AREig(arcomplex EigVal[], int n, int nnz, arcomplex A[],\n int irow[], int pcol[], int nev, char* which = \"LM\", int ncv = 0,\n FLOAT tol = 0.0, int maxit = 0, arcomplex* resid = 0,\n bool AutoShift = true)\n{\n\n // Creating a matrix in ARPACK++ format.\n\n ARluNonSymMatrix > matrix(n, nnz, A, irow, pcol);\n\n // Defining the eigenvalue problem.\n\n ARluCompStdEig prob(nev, matrix, which, ncv, tol,\n maxit, resid, AutoShift);\n\n // Finding eigenvalues.\n\n return prob.Eigenvalues(EigVal);\n\n} // complex standard problem, only eigenvalues, regular mode.\n\n\ntemplate \nint AREig(arcomplex EigVal[], arcomplex EigVec[], int n,\n int nnz, arcomplex A[], int irow[], int pcol[],\n int nev, char* which = \"LM\", int ncv = 0, FLOAT tol = 0.0,\n int maxit = 0, arcomplex* resid = 0, bool AutoShift = true)\n{\n\n // Creating a matrix in ARPACK++ format.\n\n ARluNonSymMatrix > matrix(n, nnz, A, irow, pcol);\n\n // Defining the eigenvalue problem.\n\n ARluCompStdEig prob(nev, matrix, which, ncv, tol,\n maxit, resid, AutoShift);\n\n // Finding eigenvalues and eigenvectors.\n\n return prob.EigenValVectors(EigVec, EigVal);\n\n} // complex standard problem, values and vectors, regular mode.\n\n\ntemplate \nint AREig(arcomplex EigVal[], int n, int nnz, arcomplex A[],\n int irow[], int pcol[], arcomplex sigma, int nev,\n char* which = \"LM\", int ncv = 0, FLOAT tol = 0.0, int maxit = 0,\n arcomplex* resid = 0, bool AutoShift = true)\n{\n\n // Creating a matrix in ARPACK++ format.\n\n ARluNonSymMatrix > matrix(n, nnz, A, irow, pcol);\n\n // Defining the eigenvalue problem.\n\n ARluCompStdEig prob(nev, matrix, sigma, which, ncv,\n tol, maxit, resid, AutoShift);\n\n // Finding eigenvalues.\n\n return prob.Eigenvalues(EigVal);\n\n} // complex standard problem, only eigenvalues, shift-and-invert.\n\n\ntemplate \nint AREig(arcomplex EigVal[], arcomplex EigVec[], int n,\n int nnz, arcomplex A[], int irow[], int pcol[],\n arcomplex sigma, int nev, char* which = \"LM\",\n int ncv = 0, FLOAT tol = 0.0, int maxit = 0,\n arcomplex* resid = 0, bool AutoShift = true)\n{\n\n // Creating a matrix in ARPACK++ format.\n\n ARluNonSymMatrix > matrix(n, nnz, A, irow, pcol);\n\n // Defining the eigenvalue problem.\n\n ARluCompStdEig prob(nev, matrix, sigma, which, ncv,\n tol, maxit, resid, AutoShift);\n\n // Finding eigenvalues and eigenvectors.\n\n return prob.EigenValVectors(EigVec, EigVal);\n\n} // complex standard problem, values and vectors, shift-and-invert.\n\n\ntemplate \nint AREig(arcomplex EigVal[], int n, int nnzA,\n arcomplex A[], int irowA[], int pcolA[], int nnzB,\n arcomplex B[], int irowB[], int pcolB[], int nev,\n char* which = \"LM\", int ncv = 0, FLOAT tol = 0.0, int maxit = 0,\n arcomplex* resid = 0, bool AutoShift = true)\n{\n\n // Creating two matrices in ARPACK++ format.\n\n ARluNonSymMatrix > matrixA(n, nnzA, A, irowA, pcolA);\n ARluNonSymMatrix > matrixB(n, nnzB, B, irowB, pcolB);\n\n // Defining the eigenvalue problem.\n\n ARluCompGenEig prob(nev, matrixA, matrixB, which,\n ncv, tol, maxit, resid, AutoShift);\n\n // Finding eigenvalues.\n\n return prob.Eigenvalues(EigVal);\n\n} // complex generalized problem, only eigenvalues, regular mode.\n\n\ntemplate \nint AREig(arcomplex EigVal[], arcomplex EigVec[], int n,\n int nnzA, arcomplex A[], int irowA[], int pcolA[],\n int nnzB, arcomplex B[], int irowB[], int pcolB[],\n int nev, char* which = \"LM\", int ncv = 0, FLOAT tol = 0.0,\n int maxit = 0, arcomplex* resid = 0, bool AutoShift = true)\n{\n\n // Creating two matrices in ARPACK++ format.\n\n ARluNonSymMatrix > matrixA(n, nnzA, A, irowA, pcolA);\n ARluNonSymMatrix > matrixB(n, nnzB, B, irowB, pcolB);\n\n // Defining the eigenvalue problem.\n\n ARluCompGenEig prob(nev, matrixA, matrixB, which,\n ncv, tol, maxit, resid, AutoShift);\n\n // Finding eigenvalues and eigenvectors.\n\n return prob.EigenValVectors(EigVec, EigVal);\n\n} // complex generalized problem, values and vectors, regular mode.\n\n\ntemplate \nint AREig(arcomplex EigVal[], int n, int nnzA, arcomplex A[],\n int irowA[], int pcolA[], int nnzB, arcomplex B[],\n int irowB[], int pcolB[], arcomplex sigma, int nev,\n char* which = \"LM\", int ncv = 0, FLOAT tol = 0.0, int maxit = 0,\n arcomplex* resid = 0, bool AutoShift = true)\n{\n\n // Creating two matrices in ARPACK++ format.\n\n ARluNonSymMatrix > matrixA(n, nnzA, A, irowA, pcolA);\n ARluNonSymMatrix > matrixB(n, nnzB, B, irowB, pcolB);\n\n // Defining the eigenvalue problem.\n\n ARluCompGenEig prob(nev, matrixA, matrixB, sigma, which,\n ncv, tol, maxit, resid, AutoShift);\n\n // Finding eigenvalues.\n\n return prob.Eigenvalues(EigVal);\n\n} // complex generalized problem, only eigenvalues, shift-and-invert mode.\n\n\ntemplate \nint AREig(arcomplex EigVal[], arcomplex EigVec[], int n,\n int nnzA, arcomplex A[], int irowA[], int pcolA[],\n int nnzB, arcomplex B[], int irowB[], int pcolB[],\n arcomplex sigma, int nev, char* which = \"LM\",\n int ncv = 0, FLOAT tol = 0.0, int maxit = 0,\n arcomplex* resid = 0, bool AutoShift = true)\n{\n\n // Creating two matrices in ARPACK++ format.\n\n ARluNonSymMatrix > matrixA(n, nnzA, A, irowA, pcolA);\n ARluNonSymMatrix > matrixB(n, nnzB, B, irowB, pcolB);\n\n // Defining the eigenvalue problem.\n\n ARluCompGenEig prob(nev, matrixA, matrixB, sigma, which,\n ncv, tol, maxit, resid, AutoShift);\n\n // Finding eigenvalues and eigenvectors.\n\n return prob.EigenValVectors(EigVec, EigVal);\n\n} // complex generalized problem, values and vectors, shift-and-invert mode.\n\n\ntemplate \nint AREig(double EigValR[], FLOAT EigValI[], int n, int nnz,\n FLOAT A[], int irow[], int pcol[], int nev, char* which = \"LM\",\n int ncv = 0, FLOAT tol = 0.0, int maxit = 0, FLOAT* resid = 0,\n bool AutoShift = true)\n{\n\n // Creating a matrix in ARPACK++ format.\n\n ARluNonSymMatrix matrix(n, nnz, A, irow, pcol);\n\n // Defining the eigenvalue problem.\n\n ARluNonSymStdEig prob(nev, matrix, which, ncv, tol,\n maxit, resid, AutoShift);\n\n // Finding eigenvalues.\n\n return prob.Eigenvalues(EigValR, EigValI);\n\n} // real nonsymmetric standard problem, only eigenvalues, regular mode.\n\n\ntemplate \nint AREig(float EigValR[], FLOAT EigValI[], int n, int nnz,\n FLOAT A[], int irow[], int pcol[], int nev, char* which = \"LM\",\n int ncv = 0, FLOAT tol = 0.0, int maxit = 0, FLOAT* resid = 0,\n bool AutoShift = true)\n{\n\n // Creating a matrix in ARPACK++ format.\n\n ARluNonSymMatrix matrix(n, nnz, A, irow, pcol);\n\n // Defining the eigenvalue problem.\n\n ARluNonSymStdEig prob(nev, matrix, which, ncv, tol,\n maxit, resid, AutoShift);\n\n // Finding eigenvalues.\n\n return prob.Eigenvalues(EigValR, EigValI);\n\n} // real nonsymmetric standard problem, only eigenvalues, regular mode.\n\n\ntemplate \nint AREig(FLOAT EigValR[], FLOAT EigValI[], FLOAT EigVec[], int n, int nnz,\n FLOAT A[], int irow[], int pcol[], int nev, char* which = \"LM\",\n int ncv = 0, FLOAT tol = 0.0, int maxit = 0, FLOAT* resid = 0,\n bool AutoShift = true)\n{\n\n // Creating a matrix in ARPACK++ format.\n\n ARluNonSymMatrix matrix(n, nnz, A, irow, pcol);\n\n // Defining the eigenvalue problem.\n\n ARluNonSymStdEig prob(nev, matrix, which, ncv, tol,\n maxit, resid, AutoShift);\n\n // Finding eigenvalues and eigenvectors.\n\n return prob.EigenValVectors(EigVec, EigValR, EigValI);\n\n} // real nonsymmetric standard problem, values and vectors, regular mode.\n\n\ntemplate \nint AREig(double EigValR[], FLOAT EigValI[], int n, int nnz,\n FLOAT A[], int irow[], int pcol[], FLOAT sigma, int nev,\n char* which = \"LM\", int ncv = 0, FLOAT tol = 0.0,\n int maxit = 0, FLOAT* resid = 0, bool AutoShift = true)\n{\n\n // Creating a matrix in ARPACK++ format.\n\n ARluNonSymMatrix matrix(n, nnz, A, irow, pcol);\n\n // Defining the eigenvalue problem.\n\n ARluNonSymStdEig prob(nev, matrix, sigma, which, ncv,\n tol, maxit, resid, AutoShift);\n\n // Finding eigenvalues.\n\n return prob.Eigenvalues(EigValR, EigValI);\n\n} // real nonsymmetric standard problem, only eigenvalues, shift-and-invert.\n\n\ntemplate \nint AREig(float EigValR[], FLOAT EigValI[], int n, int nnz,\n FLOAT A[], int irow[], int pcol[], FLOAT sigma, int nev,\n char* which = \"LM\", int ncv = 0, FLOAT tol = 0.0,\n int maxit = 0, FLOAT* resid = 0, bool AutoShift = true)\n{\n\n // Creating a matrix in ARPACK++ format.\n\n ARluNonSymMatrix matrix(n, nnz, A, irow, pcol);\n\n // Defining the eigenvalue problem.\n\n ARluNonSymStdEig prob(nev, matrix, sigma, which, ncv,\n tol, maxit, resid, AutoShift);\n\n // Finding eigenvalues.\n\n return prob.Eigenvalues(EigValR, EigValI);\n\n} // real nonsymmetric standard problem, only eigenvalues, shift-and-invert.\n\n\ntemplate \nint AREig(FLOAT EigValR[], FLOAT EigValI[], FLOAT EigVec[], int n, int nnz,\n FLOAT A[], int irow[], int pcol[], FLOAT sigma, int nev,\n char* which = \"LM\", int ncv = 0, FLOAT tol = 0.0, int maxit = 0,\n FLOAT* resid = 0, bool AutoShift = true)\n{\n\n // Creating a matrix in ARPACK++ format.\n\n ARluNonSymMatrix matrix(n, nnz, A, irow, pcol);\n\n // Defining the eigenvalue problem.\n\n ARluNonSymStdEig prob(nev, matrix, sigma, which, ncv,\n tol, maxit, resid, AutoShift);\n\n // Finding eigenvalues and eigenvectors.\n\n return prob.EigenValVectors(EigVec, EigValR, EigValI);\n\n} // real nonsymmetric standard problem, values and vectors, shift-and-invert.\n\n\ntemplate \nint AREig(double EigValR[], FLOAT EigValI[], int n, int nnzA,\n FLOAT A[], int irowA[], int pcolA[], int nnzB,\n FLOAT B[], int irowB[], int pcolB[], int nev,\n char* which = \"LM\", int ncv = 0, FLOAT tol = 0.0,\n int maxit = 0, FLOAT* resid = 0, bool AutoShift = true)\n{\n\n // Creating two matrices in ARPACK++ format.\n\n ARluNonSymMatrix matrixA(n, nnzA, A, irowA, pcolA);\n ARluNonSymMatrix matrixB(n, nnzB, B, irowB, pcolB);\n\n // Defining the eigenvalue problem.\n\n ARluNonSymGenEig prob(nev, matrixA, matrixB, which,\n ncv, tol, maxit, resid, AutoShift);\n\n // Finding eigenvalues.\n\n return prob.Eigenvalues(EigValR, EigValI);\n\n} // real nonsymmetric generalized problem, only eigenvalues, regular mode.\n\n\ntemplate \nint AREig(float EigValR[], FLOAT EigValI[], int n, int nnzA,\n FLOAT A[], int irowA[], int pcolA[], int nnzB,\n FLOAT B[], int irowB[], int pcolB[], int nev,\n char* which = \"LM\", int ncv = 0, FLOAT tol = 0.0,\n int maxit = 0, FLOAT* resid = 0, bool AutoShift = true)\n{\n\n // Creating two matrices in ARPACK++ format.\n\n ARluNonSymMatrix matrixA(n, nnzA, A, irowA, pcolA);\n ARluNonSymMatrix matrixB(n, nnzB, B, irowB, pcolB);\n\n // Defining the eigenvalue problem.\n\n ARluNonSymGenEig prob(nev, matrixA, matrixB, which,\n ncv, tol, maxit, resid, AutoShift);\n\n // Finding eigenvalues.\n\n return prob.Eigenvalues(EigValR, EigValI);\n\n} // real nonsymmetric generalized problem, only eigenvalues, regular mode.\n\n\ntemplate \nint AREig(FLOAT EigValR[], FLOAT EigValI[], FLOAT EigVec[], int n,\n int nnzA, FLOAT A[], int irowA[], int pcolA[],\n int nnzB, FLOAT B[], int irowB[], int pcolB[],\n int nev, char* which = \"LM\", int ncv = 0, FLOAT tol = 0.0,\n int maxit = 0, FLOAT* resid = 0, bool AutoShift = true)\n{\n\n // Creating two matrices in ARPACK++ format.\n\n ARluNonSymMatrix matrixA(n, nnzA, A, irowA, pcolA);\n ARluNonSymMatrix matrixB(n, nnzB, B, irowB, pcolB);\n\n // Defining the eigenvalue problem.\n\n ARluNonSymGenEig prob(nev, matrixA, matrixB, which,\n ncv, tol, maxit, resid, AutoShift);\n\n // Finding eigenvalues and eigenvectors.\n\n return prob.EigenValVectors(EigVec, EigValR, EigValI);\n\n} // real nonsymmetric generalized problem, values and vectors, regular mode.\n\n\ntemplate \nint AREig(double EigValR[], FLOAT EigValI[], int n, int nnzA,\n FLOAT A[], int irowA[], int pcolA[], int nnzB,\n FLOAT B[], int irowB[], int pcolB[], FLOAT sigma,\n int nev, char* which = \"LM\", int ncv = 0, FLOAT tol = 0.0,\n int maxit = 0, FLOAT* resid = 0, bool AutoShift = true)\n{\n\n // Creating two matrices in ARPACK++ format.\n\n ARluNonSymMatrix matrixA(n, nnzA, A, irowA, pcolA);\n ARluNonSymMatrix matrixB(n, nnzB, B, irowB, pcolB);\n\n // Defining the eigenvalue problem.\n\n ARluNonSymGenEig prob(nev, matrixA, matrixB, sigma, which,\n ncv, tol, maxit, resid, AutoShift);\n\n // Finding eigenvalues.\n\n return prob.Eigenvalues(EigValR, EigValI);\n\n} // real nonsymmetric generalized problem, only eigenvalues,\n // real shift-and-invert mode.\n\n\ntemplate \nint AREig(float EigValR[], FLOAT EigValI[], int n, int nnzA,\n FLOAT A[], int irowA[], int pcolA[], int nnzB,\n FLOAT B[], int irowB[], int pcolB[], FLOAT sigma,\n int nev, char* which = \"LM\", int ncv = 0, FLOAT tol = 0.0,\n int maxit = 0, FLOAT* resid = 0, bool AutoShift = true)\n{\n\n // Creating two matrices in ARPACK++ format.\n\n ARluNonSymMatrix matrixA(n, nnzA, A, irowA, pcolA);\n ARluNonSymMatrix matrixB(n, nnzB, B, irowB, pcolB);\n\n // Defining the eigenvalue problem.\n\n ARluNonSymGenEig prob(nev, matrixA, matrixB, sigma, which,\n ncv, tol, maxit, resid, AutoShift);\n\n // Finding eigenvalues.\n\n return prob.Eigenvalues(EigValR, EigValI);\n\n} // real nonsymmetric generalized problem, only eigenvalues,\n // real shift-and-invert mode.\n\n\ntemplate \nint AREig(FLOAT EigValR[], FLOAT EigValI[], FLOAT EigVec[], int n,\n int nnzA, FLOAT A[], int irowA[], int pcolA[], int nnzB,\n FLOAT B[], int irowB[], int pcolB[], FLOAT sigma, int nev,\n char* which = \"LM\", int ncv = 0, FLOAT tol = 0.0,\n int maxit = 0, FLOAT* resid = 0, bool AutoShift = true)\n{\n\n // Creating two matrices in ARPACK++ format.\n\n ARluNonSymMatrix matrixA(n, nnzA, A, irowA, pcolA);\n ARluNonSymMatrix matrixB(n, nnzB, B, irowB, pcolB);\n\n // Defining the eigenvalue problem.\n\n ARluNonSymGenEig prob(nev, matrixA, matrixB, sigma, which,\n ncv, tol, maxit, resid, AutoShift);\n\n // Finding eigenvalues and eigenvectors.\n\n return prob.EigenValVectors(EigVec, EigValR, EigValI);\n\n} // real nonsymmetric generalized problem, values and vectors,\n // real shift-and-invert mode.\n\n\ntemplate \nint AREig(FLOAT EigValR[], FLOAT EigValI[], int n, int nnzA, FLOAT A[],\n int irowA[], int pcolA[], int nnzB, FLOAT B[], int irowB[],\n int pcolB[], char part, FLOAT sigmaR, FLOAT sigmaI,\n int nev, char* which = \"LM\", int ncv = 0, FLOAT tol = 0.0,\n int maxit = 0, FLOAT* resid = 0, bool AutoShift = true)\n{\n\n // Creating two matrices in ARPACK++ format.\n\n ARluNonSymMatrix matrixA(n, nnzA, A, irowA, pcolA);\n ARluNonSymMatrix matrixB(n, nnzB, B, irowB, pcolB);\n\n // Defining the eigenvalue problem.\n\n ARluNonSymGenEig prob(nev, matrixA, matrixB, part,\n sigmaR, sigmaI, which, ncv,\n tol, maxit, resid, AutoShift);\n\n // Finding eigenvalues.\n\n return prob.Eigenvalues(EigValR, EigValI);\n\n} // real nonsymmetric generalized problem, only eigenvalues,\n // complex shift-and-invert mode.\n\n\ntemplate \nint AREig(FLOAT EigValR[], FLOAT EigValI[], FLOAT EigVec[], int n, int nnzA,\n FLOAT A[], int irowA[], int pcolA[], int nnzB, FLOAT B[],\n int irowB[], int pcolB[], char part, FLOAT sigmaR, FLOAT sigmaI,\n int nev, char* which = \"LM\", int ncv = 0, FLOAT tol = 0.0,\n int maxit = 0, FLOAT* resid = 0, bool AutoShift = true)\n{\n\n // Creating two matrices in ARPACK++ format.\n\n ARluNonSymMatrix matrixA(n, nnzA, A, irowA, pcolA);\n ARluNonSymMatrix matrixB(n, nnzB, B, irowB, pcolB);\n\n // Defining the eigenvalue problem.\n\n ARluNonSymGenEig prob(nev, matrixA, matrixB, part,\n sigmaR, sigmaI, which, ncv,\n tol, maxit, resid, AutoShift);\n\n // Finding eigenvalues and eigenvectors.\n\n return prob.EigenValVectors(EigVec, EigValR, EigValI);\n\n} // real nonsymmetric generalized problem, values and vectors,\n // complex shift-and-invert mode.\n\n\ntemplate \nint AREig(FLOAT EigVal[], int n, int nnz, FLOAT A[], int irow[],\n int pcol[], char uplo, int nev, char* which = \"LM\",\n int ncv = 0, FLOAT tol = 0.0, int maxit = 0,\n FLOAT* resid = 0, bool AutoShift = true)\n{\n\n // Creating a matrix in ARPACK++ format.\n\n ARluSymMatrix matrix(n, nnz, A, irow, pcol, uplo);\n\n // Defining the eigenvalue problem.\n\n ARluSymStdEig prob(nev, matrix, which, ncv, tol,\n maxit, resid, AutoShift);\n\n // Finding eigenvalues.\n\n return prob.Eigenvalues(EigVal);\n\n} // real symmetric standard problem, only eigenvalues, regular mode.\n\n\ntemplate \nint AREig(FLOAT EigVal[], FLOAT EigVec[], int n, int nnz, FLOAT A[],\n int irow[], int pcol[], char uplo, int nev,\n char* which = \"LM\", int ncv = 0, FLOAT tol = 0.0,\n int maxit = 0, FLOAT* resid = 0, bool AutoShift = true)\n{\n\n // Creating a matrix in ARPACK++ format.\n\n ARluSymMatrix matrix(n, nnz, A, irow, pcol, uplo);\n\n // Defining the eigenvalue problem.\n\n ARluSymStdEig prob(nev, matrix, which, ncv, tol,\n maxit, resid, AutoShift);\n\n // Finding eigenvalues and eigenvectors.\n\n return prob.EigenValVectors(EigVec, EigVal);\n\n} // real symmetric standard problem, values and vectors, regular mode.\n\n\ntemplate \nint AREig(FLOAT EigVal[], int n, int nnz, FLOAT A[], int irow[],\n int pcol[], char uplo, FLOAT sigma, int nev,\n char* which = \"LM\", int ncv = 0, FLOAT tol = 0.0,\n int maxit = 0, FLOAT* resid = 0, bool AutoShift = true)\n{\n\n // Creating a matrix in ARPACK++ format.\n\n ARluSymMatrix matrix(n, nnz, A, irow, pcol, uplo);\n\n // Defining the eigenvalue problem.\n\n ARluSymStdEig prob(nev, matrix, sigma, which, ncv,\n tol, maxit, resid, AutoShift);\n\n // Finding eigenvalues.\n\n return prob.Eigenvalues(EigVal);\n\n} // real symmetric standard problem, only eigenvalues, shift-and-invert.\n\n\ntemplate \nint AREig(FLOAT EigVal[], FLOAT EigVec[], int n, int nnz, FLOAT A[],\n int irow[], int pcol[], char uplo, FLOAT sigma,\n int nev, char* which = \"LM\", int ncv = 0, FLOAT tol = 0.0,\n int maxit = 0, FLOAT* resid = 0, bool AutoShift = true)\n{\n\n // Creating a matrix in ARPACK++ format.\n\n ARluSymMatrix matrix(n, nnz, A, irow, pcol, uplo);\n\n // Defining the eigenvalue problem.\n\n ARluSymStdEig prob(nev, matrix, sigma, which, ncv,\n tol, maxit, resid, AutoShift);\n\n // Finding eigenvalues and eigenvectors.\n\n return prob.EigenValVectors(EigVec, EigVal);\n\n} // real symmetric standard problem, values and vectors, shift-and-invert.\n\n\ntemplate \nint AREig(FLOAT EigVal[], int n, int nnzA, FLOAT A[], int irowA[],\n int pcolA[], int nnzB, FLOAT B[], int irowB[], int pcolB[],\n char uplo, int nev, char* which = \"LM\", int ncv = 0,\n FLOAT tol = 0.0, int maxit = 0, FLOAT* resid = 0,\n bool AutoShift = true)\n{\n\n // Creating two matrices in ARPACK++ format.\n\n ARluSymMatrix matrixA(n, nnzA, A, irowA, pcolA, uplo);\n ARluSymMatrix matrixB(n, nnzB, B, irowB, pcolB, uplo);\n\n // Defining the eigenvalue problem.\n\n ARluSymGenEig prob(nev, matrixA, matrixB, which,\n ncv, tol, maxit, resid, AutoShift);\n\n // Finding eigenvalues.\n\n return prob.Eigenvalues(EigVal);\n\n} // real symmetric generalized problem, only eigenvalues, regular mode.\n\n\ntemplate \nint AREig(FLOAT EigVal[], FLOAT EigVec[], int n, int nnzA, FLOAT A[],\n int irowA[], int pcolA[], int nnzB, FLOAT B[], int irowB[],\n int pcolB[], char uplo, int nev, char* which = \"LM\",\n int ncv = 0, FLOAT tol = 0.0, int maxit = 0,\n FLOAT* resid = 0, bool AutoShift = true)\n{\n\n // Creating two matrices in ARPACK++ format.\n\n ARluSymMatrix matrixA(n, nnzA, A, irowA, pcolA, uplo);\n ARluSymMatrix matrixB(n, nnzB, B, irowB, pcolB, uplo);\n\n // Defining the eigenvalue problem.\n\n ARluSymGenEig prob(nev, matrixA, matrixB, which,\n ncv, tol, maxit, resid, AutoShift);\n\n // Finding eigenvalues and eigenvectors.\n\n return prob.EigenValVectors(EigVec, EigVal);\n\n} // real symmetric generalized problem, values and vectors, regular mode.\n\n\ntemplate \nint AREig(FLOAT EigVal[], int n, int nnzA, FLOAT A[], int irowA[],\n int pcolA[], int nnzB, FLOAT B[], int irowB[], int pcolB[],\n char uplo, char InvertMode, FLOAT sigma, int nev,\n char* which = \"LM\", int ncv = 0, FLOAT tol = 0.0,\n int maxit = 0, FLOAT* resid = 0, bool AutoShift = true)\n{\n\n // Creating two matrices in ARPACK++ format.\n\n ARluSymMatrix matrixA(n, nnzA, A, irowA, pcolA, uplo);\n ARluSymMatrix matrixB(n, nnzB, B, irowB, pcolB, uplo);\n\n // Defining the eigenvalue problem.\n\n ARluSymGenEig prob(InvertMode, nev, matrixA, matrixB, sigma,\n which, ncv, tol, maxit, resid, AutoShift);\n\n // Finding eigenvalues.\n\n return prob.Eigenvalues(EigVal);\n\n} // real symmetric generalized problem, only eigenvalues,\n // shift-and-invert, buckling and Cayley modes.\n\n\ntemplate \nint AREig(FLOAT EigVal[], FLOAT EigVec[], int n, int nnzA, FLOAT A[],\n int irowA[], int pcolA[], int nnzB, FLOAT B[], int irowB[],\n int pcolB[], char uplo, char InvertMode, FLOAT sigma,\n int nev, char* which = \"LM\", int ncv = 0, FLOAT tol = 0.0,\n int maxit = 0, FLOAT* resid = 0, bool AutoShift = true)\n{\n\n // Creating two matrices in ARPACK++ format.\n\n ARluSymMatrix matrixA(n, nnzA, A, irowA, pcolA, uplo);\n ARluSymMatrix matrixB(n, nnzB, B, irowB, pcolB, uplo);\n\n // Defining the eigenvalue problem.\n\n ARluSymGenEig prob(InvertMode, nev, matrixA, matrixB, sigma,\n which, ncv, tol, maxit, resid, AutoShift);\n\n // Finding eigenvalues and eigenvectors.\n\n return prob.EigenValVectors(EigVec, EigVal);\n\n} // real symmetric generalized problem, values and vectors,\n // shift-and-invert, buckling and Cayley modes.\n\n\n\ntemplate\nvoid SymmetricMatrixA(INT nx, INT& n, INT& nnz, FLOAT* &A, \n INT* &irow, INT* &pcol, char uplo = 'L')\n\n{\n\n // Defining internal variables.\n\n INT i, j;\n FLOAT h2, df, dd;\n\n // Defining constants.\n\n h2 = 1.0/(FLOAT(nx+1)*FLOAT(nx+1));\n dd = 4.0/h2;\n df = -1.0/h2;\n\n // Defining the number of columns and nonzero elements of matrix.\n\n n = nx*nx;\n nnz = 3*n-2*nx;\n\n // Creating output vectors.\n\n A = new FLOAT[nnz];\n irow = new INT[nnz];\n pcol = new INT[n+1];\n\n // Defining matrix A.\n\n pcol[0] = 0;\n i = 0;\n\n if (uplo == 'U') {\n\n for (j = 0; j < n; j++) {\n if (j >= nx) {\n A[i] = df; irow[i++] = j-nx;\n }\n if ((j%nx) != 0) {\n A[i] = df; irow[i++] = j-1;\n }\n A[i] = dd; irow[i++] = j;\n pcol[j+1] = i;\n }\n\n }\n else {\n\n for (j = 0; j < n; j++) {\n A[i] = dd; irow[i++] = j;\n if (((j+1)%nx) != 0) {\n A[i] = df; irow[i++] = j+1;\n }\n if (j < n-nx) {\n A[i] = df; irow[i++] = j+nx;\n }\n pcol[j+1] = i;\n }\n\n }\n\n} // SymmetricMatrixA.\n\r\n\ntemplate\nvoid Solution(INT nconv, INT n, INT nnz, FLOAT A[], INT irow[], INT pcol[],\n char uplo, FLOAT EigVal[], FLOAT* EigVec = 0)\n/*\n Prints eigenvalues and eigenvectors of symmetric eigen-problems\n on standard \"cout\" stream.\n*/\n\n{\n\n INT i;\n FLOAT* Ax;\n FLOAT* ResNorm;\n ARluSymMatrix matrix(n, nnz, A, irow, pcol, uplo);\n\n cout << endl << endl << \"Testing ARPACK++ function AREig\" << endl;\n cout << \"Real symmetric eigenvalue problem: A*x - lambda*x \\n \\n\";\n\n cout << \"Dimension of the system : \" << n << endl;\n cout << \"Number of 'converged' eigenvalues : \" << nconv << endl << endl;\n\n // Printing eigenvalues.\n\n cout << \"Eigenvalues:\" << endl;\n\n for (i=0; i\nvoid Solution(INT nconv, INT n, INT nnzA, FLOAT A[], INT irowA[],\n INT pcolA[], INT nnzB, FLOAT B[], INT irowB[], INT pcolB[],\n char uplo, FLOAT EigVal[], FLOAT* EigVec = 0)\n/*\n Prints eigenvalues and eigenvectors of symmetric generalized\n eigen-problem on standard \"cout\" stream.\n*/\n\n{\n\n INT i;\n FLOAT *Ax, *Bx;\n FLOAT *ResNorm;\n ARluSymMatrix matrixA(n, nnzA, A, irowA, pcolA, uplo);\n ARluSymMatrix matrixB(n, nnzB, B, irowB, pcolB, uplo);\n\n cout << endl << endl << \"Testing ARPACK++ function AREig\" << endl;\n cout << \"Real symmetric generalized eigenvalue problem: A*x - lambda*B*x\";\n cout << endl << endl;\n\n cout << \"Dimension of the system : \" << n << endl;\n cout << \"Number of 'converged' eigenvalues : \" << nconv << endl << endl;\n\n // Printing eigenvalues.\n\n cout << \"Eigenvalues:\" << endl;\n\n for (i=0; iadd( BOOST_TEST_CASE( &check_eigen_solve ), 0);\r\n}\r\n", "meta": {"hexsha": "e2431779b65b05da70fd9e63c9f1df94119bba21", "size": 30367, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "C++/test/check_eigen_solve.cpp", "max_stars_repo_name": "JohnReid/biopsy", "max_stars_repo_head_hexsha": "1eeb714ba5b53f2ecf776d865d32e2078cbc0338", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "C++/test/check_eigen_solve.cpp", "max_issues_repo_name": "JohnReid/biopsy", "max_issues_repo_head_hexsha": "1eeb714ba5b53f2ecf776d865d32e2078cbc0338", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "C++/test/check_eigen_solve.cpp", "max_forks_repo_name": "JohnReid/biopsy", "max_forks_repo_head_hexsha": "1eeb714ba5b53f2ecf776d865d32e2078cbc0338", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0961347869, "max_line_length": 78, "alphanum_fraction": 0.6178417361, "num_tokens": 9336, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7549149978955811, "lm_q2_score": 0.66192288918838, "lm_q1q2_score": 0.49969551649868293}} {"text": "#pragma once\n\n#include \n#include \n\nint calculateInt()\n{\n int number = 22;\n for (int i=1; i<20; ++i)\n {\n number = i * number / 4;\n }\n return number;\n}\n\nEigen::Vector3f calculateVec()\n{\n auto x = Eigen::Vector3f{1, 0, 0};\n auto y = Eigen::Vector3f{0, 1, 0};\n return x.cross(y);\n}", "meta": {"hexsha": "fd51589e8248e3018db6c716a8963535e8752d75", "size": 333, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "test/mini-project/include/smallfunctions.hpp", "max_stars_repo_name": "thautwarm/clang-build", "max_stars_repo_head_hexsha": "79cc6bd8e17a328d9e6a0fbdada2ba88600423aa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2018-02-28T10:16:43.000Z", "max_stars_repo_stars_event_max_datetime": "2018-03-08T22:12:45.000Z", "max_issues_repo_path": "test/mini-project/include/smallfunctions.hpp", "max_issues_repo_name": "thautwarm/clang-build", "max_issues_repo_head_hexsha": "79cc6bd8e17a328d9e6a0fbdada2ba88600423aa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 23.0, "max_issues_repo_issues_event_min_datetime": "2018-02-25T21:46:37.000Z", "max_issues_repo_issues_event_max_datetime": "2018-03-09T20:47:15.000Z", "max_forks_repo_path": "test/mini-project/include/smallfunctions.hpp", "max_forks_repo_name": "thautwarm/clang-build", "max_forks_repo_head_hexsha": "79cc6bd8e17a328d9e6a0fbdada2ba88600423aa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.8571428571, "max_line_length": 38, "alphanum_fraction": 0.5615615616, "num_tokens": 112, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7549149868676284, "lm_q2_score": 0.6619228825191872, "lm_q1q2_score": 0.49969550416435493}} {"text": "#include \n#include \n#include \n#include \n#include \n\n int cc_find_new_j(double val [],\n int vct,\n double as [],\n int nas,\n double tol,\n double res []);\n \n \n void cc_solve_stomp(\n double * a,\n double * y,\n int * asize,\n int * ressize,\n\t double OptTol,\n\t int maxIters,\n int *niter,\n int *naSet,\n double *asol,\n\t double *activeSet,\n\t double *aresult);\n \n double cmeana_ms(double * vals, int size,double * res);\n ", "meta": {"hexsha": "0f1355af65daf74208e9eaa2069de6220f46a8f7", "size": 632, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "swinzip-v2.0/src/StOMP/ccstomp.hpp", "max_stars_repo_name": "msalloum80/SWinzip", "max_stars_repo_head_hexsha": "5d43e9f11776d513218b891683b7aa00b36fae23", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2020-05-17T07:58:23.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-17T07:58:23.000Z", "max_issues_repo_path": "swinzip-v2.5/src/StOMP/ccstomp.hpp", "max_issues_repo_name": "msalloum80/SWinzip", "max_issues_repo_head_hexsha": "5d43e9f11776d513218b891683b7aa00b36fae23", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "swinzip-v2.5/src/StOMP/ccstomp.hpp", "max_forks_repo_name": "msalloum80/SWinzip", "max_forks_repo_head_hexsha": "5d43e9f11776d513218b891683b7aa00b36fae23", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-05-05T20:18:25.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-05T20:18:25.000Z", "avg_line_length": 21.7931034483, "max_line_length": 57, "alphanum_fraction": 0.5553797468, "num_tokens": 160, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7549149868676283, "lm_q2_score": 0.6619228691808012, "lm_q1q2_score": 0.49969549409500735}} {"text": "//==============================================================================\n// Copyright 2003 - 2011 LASMEA UMR 6602 CNRS/Univ. Clermont II\n// Copyright 2009 - 2011 LRI UMR 8623 CNRS/Univ Paris Sud XI\n//\n// Distributed under the Boost Software License, Version 1.0.\n// See accompanying file LICENSE.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt\n//==============================================================================\n#ifndef NT2_EXPONENTIAL_FUNCTIONS_GENERIC_POW2_HPP_INCLUDED\n#define NT2_EXPONENTIAL_FUNCTIONS_GENERIC_POW2_HPP_INCLUDED\n\n#include \n#include \n#include \n#include \n#include \n#include \n\n#ifndef BOOST_SIMD_NO_INVALIDS\n#include \n#include \n#endif\n\nnamespace nt2 { namespace ext\n{\n BOOST_DISPATCH_IMPLEMENT ( pow2_, tag::cpu_\n , (A0)(A1)\n , (generic_< floating_ >)\n (generic_< integer_ >)\n )\n {\n typedef A0 result_type;\n\n NT2_FUNCTOR_CALL(2)\n {\n return nt2::fast_ldexp(a0, a1);\n }\n };\n\n BOOST_DISPATCH_IMPLEMENT ( pow2_, tag::cpu_\n , (A0)\n , (generic_< floating_ >)\n (generic_< floating_ >)\n )\n {\n\n typedef A0 result_type;\n\n NT2_FUNCTOR_CALL_REPEAT(2)\n {\n #ifndef BOOST_SIMD_NO_INVALIDS\n BOOST_ASSERT_MSG(boost::simd::assert_all(is_finite(a1)),\n \"pow2 is not defined for an invalid second parameter\");\n #endif\n return nt2::fast_ldexp(a0, nt2::toint(a1));\n }\n };\n\n BOOST_DISPATCH_IMPLEMENT ( pow2_, tag::cpu_\n , (A0)(A1)\n , (generic_< integer_ >)\n (generic_< integer_ >)\n )\n {\n\n typedef A0 result_type;\n\n NT2_FUNCTOR_CALL_REPEAT(2)\n {\n return nt2::fast_ldexp(a0, a1);\n }\n };\n\n BOOST_DISPATCH_IMPLEMENT ( pow2_, tag::cpu_\n , (A0)\n , (generic_< integer_ >)\n )\n {\n\n typedef A0 result_type;\n\n NT2_FUNCTOR_CALL_REPEAT(1)\n {\n return nt2::fast_ldexp(One(), a0);\n }\n };\n\n BOOST_DISPATCH_IMPLEMENT ( pow2_, tag::cpu_\n , (A0)\n , (generic_< floating_ >)\n )\n {\n\n typedef A0 result_type;\n\n NT2_FUNCTOR_CALL_REPEAT(1)\n {\n #ifndef BOOST_SIMD_NO_INVALIDS\n BOOST_ASSERT_MSG(boost::simd::assert_all(is_finite(a0)),\n \"pow2 with one parameter is not defined for an invalid entry\");\n #endif\n return nt2::fast_ldexp(One(), toint(a0));\n }\n };\n} }\n\n#endif\n", "meta": {"hexsha": "78f84418f6079cfe4268b7e419541a64c6058edd", "size": 3129, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "modules/core/exponential/include/nt2/exponential/functions/generic/pow2.hpp", "max_stars_repo_name": "psiha/nt2", "max_stars_repo_head_hexsha": "5e829807f6b57b339ca1be918a6b60a2507c54d0", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 34.0, "max_stars_repo_stars_event_min_datetime": "2017-05-19T18:10:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-04T02:18:13.000Z", "max_issues_repo_path": "modules/core/exponential/include/nt2/exponential/functions/generic/pow2.hpp", "max_issues_repo_name": "psiha/nt2", "max_issues_repo_head_hexsha": "5e829807f6b57b339ca1be918a6b60a2507c54d0", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "modules/core/exponential/include/nt2/exponential/functions/generic/pow2.hpp", "max_forks_repo_name": "psiha/nt2", "max_forks_repo_head_hexsha": "5e829807f6b57b339ca1be918a6b60a2507c54d0", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 7.0, "max_forks_repo_forks_event_min_datetime": "2017-12-02T12:59:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-31T12:46:14.000Z", "avg_line_length": 28.9722222222, "max_line_length": 86, "alphanum_fraction": 0.5170981144, "num_tokens": 733, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7549149868676283, "lm_q2_score": 0.661922862511608, "lm_q1q2_score": 0.4996954890603335}} {"text": "/*\n * Copyright 2020 Adobe. All rights reserved.\n * This file is licensed to you under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License. You may obtain a copy\n * of the License at http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software distributed under\n * the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS\n * OF ANY KIND, either express or implied. See the License for the specific language\n * governing permissions and limitations under the License.\n */\n#include \n#include \n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n\nTEST_CASE(\"ComputeMeshCovariance\", \"[mesh][covariance]\")\n{\n using namespace lagrange;\n\n Vertices3D ref_vertices(6 + 3, 3);\n Triangles facets(4 + 1, 3);\n\n const double a = 0.5;\n const double b = 2.0;\n const double large_number = 1000;\n\n ref_vertices.row(0) << -a / 2., -b / 2., 0;\n ref_vertices.row(1) << +a / 2., -b / 2., 0;\n ref_vertices.row(2) << +a / 2., 0, 0;\n ref_vertices.row(3) << +a / 2., b / 4., 0;\n ref_vertices.row(4) << +a / 2., b / 2., 0;\n ref_vertices.row(5) << -a / 2., b / 2., 0;\n // Don't include in computation\n ref_vertices.row(6) << large_number, large_number, large_number;\n ref_vertices.row(7) << -large_number, -large_number, -large_number;\n ref_vertices.row(8) << 2 * large_number, 2 * large_number, 2 * large_number;\n\n\n facets.row(0) << 0, 1, 2;\n // Don't include in computation\n facets.row(1) << 6, 7, 8;\n //\n facets.row(2) << 0, 2, 3;\n facets.row(3) << 0, 3, 4;\n facets.row(4) << 0, 4, 5;\n\n // Reference values without transformations\n const double ref_area = a * b;\n Eigen::Vector3d ref_center(0, 0, 0);\n Eigen::Matrix3d ref_covariance = Eigen::Matrix3d::Zero();\n ref_covariance(0, 0) = b * a * a * a / 12.;\n ref_covariance(1, 1) = a * b * b * b / 12.;\n\n // Translate\n // Eigen::Vector3d tr(0,0,0);\n Eigen::Vector3d tr(-1, 3, 4);\n\n // Rotate\n // Eigen::Matrix3d rot = Eigen::Matrix3d::Identity();\n Eigen::Matrix3d rot =\n Eigen::AngleAxisd(1.2365, Eigen::Vector3d(-1, 2, 5.1).normalized()).toRotationMatrix();\n Eigen::Matrix3d rot_covariance = rot * ref_covariance * rot.transpose();\n Eigen::Matrix3d rot_covariance_tr = rot_covariance + ref_area * tr * tr.transpose();\n\n // Create the vertices\n Vertices3D vertices = (ref_vertices * rot.transpose()).rowwise() + tr.transpose();\n\n auto mesh_unique = lagrange::create_mesh(vertices, facets);\n auto out_covariance_at_zero =\n compute_mesh_covariance(*mesh_unique, Eigen::RowVector3d::Zero(), {0, 2, 3, 4});\n auto out_covariance_at_centroid =\n compute_mesh_covariance(*mesh_unique, tr.transpose(), {0, 2, 3, 4});\n\n CHECK((out_covariance_at_zero - rot_covariance_tr).norm() == Approx(0.).margin(1e-10));\n CHECK((out_covariance_at_centroid - rot_covariance).norm() == Approx(0.).margin(1e-10));\n\n // std::cout << out_covariance_at_zero.covariance << std::endl;\n // std::cout << rot_covariance_tr << std::endl;\n\n\n} // end of TEST\n", "meta": {"hexsha": "1c5bf103f45115cd4f7e7173c097f8dbac7a96b2", "size": 3330, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "modules/core/tests/test_compute_mesh_covariance.cpp", "max_stars_repo_name": "LaudateCorpus1/lagrange", "max_stars_repo_head_hexsha": "2a49d3ee93c1f1e712c93c5c87ea25b9a83c8f40", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 156.0, "max_stars_repo_stars_event_min_datetime": "2021-01-08T19:53:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T18:32:52.000Z", "max_issues_repo_path": "modules/core/tests/test_compute_mesh_covariance.cpp", "max_issues_repo_name": "LaudateCorpus1/lagrange", "max_issues_repo_head_hexsha": "2a49d3ee93c1f1e712c93c5c87ea25b9a83c8f40", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-01-11T20:18:07.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-04T15:53:57.000Z", "max_forks_repo_path": "modules/core/tests/test_compute_mesh_covariance.cpp", "max_forks_repo_name": "LaudateCorpus1/lagrange", "max_forks_repo_head_hexsha": "2a49d3ee93c1f1e712c93c5c87ea25b9a83c8f40", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 10.0, "max_forks_repo_forks_event_min_datetime": "2021-01-11T21:03:54.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-24T06:27:44.000Z", "avg_line_length": 37.0, "max_line_length": 95, "alphanum_fraction": 0.657957958, "num_tokens": 1012, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7799929104825007, "lm_q2_score": 0.640635861701035, "lm_q1q2_score": 0.4996914303276552}} {"text": "/*\n@copyright Louis Dionne 2014\nDistributed under the Boost Software License, Version 1.0.\n(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)\n */\n\n#include \n#include \n#include \n#include \nusing namespace boost::hana;\n\n\nint main() {\n //! [main]\n BOOST_HANA_CONSTEXPR_LAMBDA auto is_permutation_of = curry<2>([](auto xs, auto perm) {\n return elem(permutations(xs), perm);\n });\n\n BOOST_HANA_CONSTEXPR_ASSERT(\n all(\n list(\n list('1', 2, 3.0),\n list('1', 3.0, 2),\n list(2, '1', 3.0),\n list(2, 3.0, '1'),\n list(3.0, '1', 2),\n list(3.0, 2, '1')\n ),\n is_permutation_of(list('1', 2, 3.0))\n )\n );\n //! [main]\n}\n", "meta": {"hexsha": "60baecd7afed5023868e459712f696b4298e7f0c", "size": 910, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "example/list/permutations.cpp", "max_stars_repo_name": "rbock/hana", "max_stars_repo_head_hexsha": "2b76377f91a5ebe037dea444e4eaabba6498d3a8", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2015-05-07T14:29:13.000Z", "max_stars_repo_stars_event_max_datetime": "2015-07-04T10:59:46.000Z", "max_issues_repo_path": "example/list/permutations.cpp", "max_issues_repo_name": "rbock/hana", "max_issues_repo_head_hexsha": "2b76377f91a5ebe037dea444e4eaabba6498d3a8", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "example/list/permutations.cpp", "max_forks_repo_name": "rbock/hana", "max_forks_repo_head_hexsha": "2b76377f91a5ebe037dea444e4eaabba6498d3a8", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0, "max_line_length": 90, "alphanum_fraction": 0.5494505495, "num_tokens": 257, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES\n\n", "lm_q1_score": 0.7799929002541068, "lm_q2_score": 0.640635861701035, "lm_q1q2_score": 0.4996914237749792}} {"text": "#include \n#include \n#include \n#include \n#include \n\n// ************************************************************************* //\n// ******************************** DATA IO ******************************** //\n// ************************************************************************* //\n\nGravitysolver::DataIO::DataIO()\n{\n epsilon = 0.0;\n}\n\nbool Gravitysolver::DataIO::readDataOld(const std::string &filename)\n{\n std::ifstream infile(filename);\n int numParticles, numGasParticles, numStarParticles;\n\n if(infile.is_open()) {\n infile >> numParticles >> numGasParticles >> numStarParticles;\n\n particles = Eigen::MatrixXf::Zero(MATRIX_DATA_ROWS, numParticles);\n\n for(int i = 0; i < numParticles; i++) {\n infile >> particles(0, i); // m\n }\n\n for(int i = 0; i < numParticles; i++) {\n infile >> particles(1, i); // x\n }\n\n for(int i = 0; i < numParticles; i++) {\n infile >> particles(2, i); // y\n }\n\n for(int i = 0; i < numParticles; i++) {\n infile >> particles(3, i); // z\n }\n\n for(int i = 0; i < numParticles; i++) {\n infile >> particles(4, i); // vx\n }\n\n for(int i = 0; i < numParticles; i++) {\n infile >> particles(5, i); // vy\n }\n\n for(int i = 0; i < numParticles; i++) {\n infile >> particles(6, i); // vz\n }\n\n return true;\n }\n\n std::cout << \"Gravitysolver::DataIO::readDataOld(\\\"\" << filename << \"\\\") \" << \"could not read file\" << std::endl;\n return false;\n}\n\nbool Gravitysolver::DataIO::readData(const std::string &filename)\n{\n std::ifstream infile(filename);\n int N;\n\n if(infile.is_open()) {\n infile >> N >> epsilon;\n particles = Eigen::MatrixXf::Zero(MATRIX_DATA_ROWS, N);\n\n float m, x, y, z, vx, vy, vz, fx, fy, fz;\n\n for(int j = 0; j < N; j++) {\n infile >> m >> x >> y >> z >> vx >> vy >> vz >> fx >> fy >> fz;\n particles(0, j) = m;\n particles(1, j) = x;\n particles(2, j) = y;\n particles(3, j) = z;\n particles(4, j) = vx;\n particles(5, j) = vy;\n particles(6, j) = vz;\n particles(7, j) = fx;\n particles(8, j) = fy;\n particles(9, j) = fz;\n }\n\n return true;\n }\n\n std::cout << \"Gravitysolver::DataIO::readData(\\\"\" << filename << \"\\\") \" << \"could not read file\" << std::endl;\n return false;\n}\n\nbool Gravitysolver::DataIO::writeData(const std::string &filename)\n{\n std::ofstream outfile(filename);\n int N = particles.cols();\n\n if(outfile.is_open()) {\n outfile << N << \" \" << epsilon << \"\\n\";\n\n for(int j = 0; j < N; j++) {\n outfile << particles(0, j) << \"\\n\"; // m\n outfile << particles(1, j) << \"\\n\"; // x\n outfile << particles(2, j) << \"\\n\"; // y\n outfile << particles(3, j) << \"\\n\"; // z\n outfile << particles(4, j) << \"\\n\"; // vx\n outfile << particles(5, j) << \"\\n\"; // vy\n outfile << particles(6, j) << \"\\n\"; // vz\n outfile << particles(7, j) << \"\\n\"; // fx\n outfile << particles(8, j) << \"\\n\"; // fy\n outfile << particles(9, j) << \"\\n\"; // fz\n }\n\n return true;\n }\n\n std::cout << \"Gravitysolver::DataIO::writeData(\\\"\" << filename << \"\\\") \" << \"could not write file\" << std::endl;\n return false;\n}\n\n// ************************************************************************* //\n// ***************************** DIRECT SOLVER ***************************** //\n// ************************************************************************* //\n\nGravitysolver::Direct::Direct()\n{\n\n}\n\nfloat Gravitysolver::Direct::softening()\n{\n return epsilon;\n}\n\nvoid Gravitysolver::Direct::setSoftening(float eps)\n{\n epsilon = eps;\n}\n\nvoid Gravitysolver::Direct::solve()\n{\n for(int i = 0; i < particles.cols(); i++) {\n particles(7, i) = .0;\n particles(8, i) = .0;\n particles(9, i) = .0;\n }\n\n float mi, mj, dx, dy, dz, sqrtInvDist, sqrtInvDist3, fx, fy, fz;\n\n for(int i = 0; i < particles.cols(); i++) {\n for(int j = i + 1; j < particles.cols(); j++) {\n mi = particles(0, i);\n mj = particles(0, j);\n dx = particles(1, i) - particles(1, j);\n dy = particles(2, i) - particles(2, j);\n dz = particles(3, i) - particles(3, j);\n\n sqrtInvDist = 1.0 / std::sqrt(dx * dx + dy * dy + dz * dz + epsilon * epsilon);\n sqrtInvDist3 = sqrtInvDist * sqrtInvDist * sqrtInvDist;\n\n // Assumption: G = 1\n fx = -mi * mj * dx * sqrtInvDist3;\n fy = -mi * mj * dy * sqrtInvDist3;\n fz = -mi * mj * dz * sqrtInvDist3;\n\n particles(7, i) += fx;\n particles(8, i) += fy;\n particles(9, i) += fz;\n particles(7, j) -= fx;\n particles(8, j) -= fy;\n particles(9, j) -= fz;\n }\n }\n}\n\nconst MatrixData &Gravitysolver::Direct::data()\n{\n return particles;\n}\n\n// ************************************************************************* //\n// ******************************* PM SOLVER ******************************* //\n// ************************************************************************* //\n\nGravitysolver::PM::PM(int numGridCells)\n{\n // The Tensor in Eigen experiences a strange case of std::bad_alloc()\n // if it has more than 776 = 2*388 complex valued elements per dimension\n if(numGridCells > 0 && numGridCells <= 388) {\n Ng = numGridCells;\n }\n else {\n std::cout << \"Gravitysolver::PM::PM(\" << numGridCells << \") numGridCells must be > 0\" << std::endl;\n Ng = 1;\n }\n\n h = .0;\n}\n\n/**\n* Returns indices for a mesh cell for the galaxy given world coordinates.\n* World coordinates have the center of the galaxy at (0, 0, 0).\n* The mapping to grid coordinates is an affine mapping where the galaxy center\n* (0, 0, 0) is in the center of the mesh as well.\n*/\nVector3i Gravitysolver::PM::worldToGrid(float x, float y, float z)\n{\n Vector3i gridCoor;\n\n gridCoor(0) = std::floor((x + (worldLen / 2)) / worldLen * Ng);\n gridCoor(1) = std::floor((y + (worldLen / 2)) / worldLen * Ng);\n gridCoor(2) = std::floor((z + (worldLen / 2)) / worldLen * Ng);\n\n return gridCoor;\n}\n\n/**\n* Returns position at the center of a mesh cell in world coordinates given cell indices\n* The position is mapped in a fashin such that (0, 0, 0) is the center of the galaxy\n* in world coordinates.\n*/\nVector3f Gravitysolver::PM::gridToWorld(int i, int j, int k)\n{\n Vector3f worldCoor;\n\n worldCoor(0) = (i * worldLen / Ng) - (worldLen / 2) + (h / 2);\n worldCoor(1) = (j * worldLen / Ng) - (worldLen / 2) + (h / 2);\n worldCoor(2) = (k * worldLen / Ng) - (worldLen / 2) + (h / 2);\n\n return worldCoor;\n}\n\nvoid Gravitysolver::PM::fft3d(FieldTensorCF &t)\n{\n const int x = t.dimension(0);\n const int y = t.dimension(1);\n const int z = t.dimension(2);\n\n Eigen::FFT fft;\n\n for(int k = 0; k < z; k++) { // for each 2d sheet make a 2d fft\n for(int j = 0; j < y; j++) { // fft in x-dir\n VectorXcf tv(x);\n for(int i = 0; i < x; i++)\n tv(i) = t(i, j, k);\n\n VectorXcf fv = fft.fwd(tv);\n for(int i = 0; i < x; i++)\n t(i, j, k) = fv(i);\n }\n\n for(int i = 0; i < x; i++) { // fft in y-dir\n VectorXcf tv(y);\n for(int j = 0; j < y; j++)\n tv(j) = t(i, j, k);\n\n VectorXcf fv = fft.fwd(tv);\n for(int j = 0; j < y; j++)\n t(i, j, k) = fv(j);\n }\n }\n\n for(int i = 0; i < x; i++) { // and for each of the x*y spikes pointing upwards in z-dir do a 1D fft\n for(int j = 0; j < y; j++) {\n\n VectorXcf tv(z);\n for(int k = 0; k < z; k++)\n tv(k) = t(i, j, k);\n\n VectorXcf fv = fft.fwd(tv);\n for(int k = 0; k < z; k++)\n t(i, j, k) = fv(k);\n }\n }\n}\n\nvoid Gravitysolver::PM::ifft3d(FieldTensorCF &t)\n{\n const int x = t.dimension(0);\n const int y = t.dimension(1);\n const int z = t.dimension(2);\n\n const float invXYZ = 1.0 / (x*y*z);\n\n for(int i = 0; i < x; i++) {\n for(int j = 0; j < y; j++) {\n for(int k = 0; k < z; k++) {\n t(i,j,k) = std::conj(t(i,j,k));\n }\n }\n }\n\n fft3d(t);\n\n for(int i = 0; i < x; i++) {\n for(int j = 0; j < y; j++) {\n for(int k = 0; k < z; k++) {\n t(i,j,k) = std::conj(t(i,j,k)) * invXYZ;\n }\n }\n }\n}\n\nvoid Gravitysolver::PM::conv3d(FieldTensorCF &out, FieldTensorCF &in, FieldTensorCF &kernel)\n{\n int x = in.dimension(0);\n int y = in.dimension(1);\n int z = in.dimension(2);\n\n fft3d(in);\n fft3d(kernel);\n\n for(int i = 0; i < x; i++) {\n for(int j = 0; j < y; j++) {\n for(int k = 0; k < z; k++) {\n out(i,j,k) = in(i,j,k) * kernel(i,j,k);\n }\n }\n }\n\n ifft3d(out);\n}\n\nvoid Gravitysolver::PM::solve()\n{\n const int N = particles.cols();\n Vector3f maxPos = particles.block(1, 0, 3, N).rowwise().maxCoeff();\n Vector3f minPosAbs = particles.block(1, 0, 3, N).rowwise().minCoeff().cwiseAbs();\n\n worldLen = std::max(maxPos.maxCoeff(), minPosAbs.maxCoeff()) * 2;\n\n // adds one layer of cells in each dimension so particles at the edge will\n // contribute with their entire mass to the density field\n worldLen += (worldLen / Ng);\n h = worldLen / Ng;\n\n std::cout << \"PM solver global parameters\" << std::endl;\n std::cout << \"---------------------------\" << std::endl;\n std::cout << \"worldLen: \" << worldLen << std::endl;\n std::cout << \"Ng: \" << Ng << std::endl;\n std::cout << \"h: \" << h << std::endl;\n std::cout << \"---------------------------\" << std::endl;\n\n density.resize(2*Ng, 2*Ng, 2*Ng);\n greenFunction.resize(2*Ng, 2*Ng, 2*Ng);\n potential.resize(2*Ng, 2*Ng, 2*Ng);\n\n ax.resize(Ng, Ng, Ng);\n ay.resize(Ng, Ng, Ng);\n az.resize(Ng, Ng, Ng);\n\n // density field construction\n for(int col = 0; col < particles.cols(); col++) {\n float m = particles(0, col);\n float px = particles(1, col);\n float py = particles(2, col);\n float pz = particles(3, col);\n\n // (NGP)\n Vector3i ti = worldToGrid(px, py, pz);\n density(ti(0), ti(1), ti(2)) += (m / (h*h*h));\n }\n\n // green's function construction\n for(int i = 0; i < Ng; i++) {\n for(int j = 0; j < Ng; j++) {\n for(int k = 0; k < Ng; k++) {\n Vector3f worldPos = gridToWorld(i, j, k);\n\n // Assumption: G = 1\n float res = -1.0 / worldPos.norm();\n\n // make the function symmetric accross all axes\n greenFunction(i, j, k) = res;\n greenFunction(i, j, 2*Ng-k-1) = res;\n greenFunction(i, 2*Ng-j-1, k) = res;\n greenFunction(i, 2*Ng-j-1, 2*Ng-k-1) = res;\n greenFunction(2*Ng-i-1, j, k) = res;\n greenFunction(2*Ng-i-1, j, 2*Ng-k-1) = res;\n greenFunction(2*Ng-i-1, 2*Ng-j-1, k) = res;\n greenFunction(2*Ng-i-1, 2*Ng-j-1, 2*Ng-k-1) = res;\n }\n }\n }\n\n // solve the poisson equation to get the potential\n conv3d(potential, density, greenFunction);\n\n // calculate acceleration field from potential\n for(int i = 1; i < Ng - 1; i++) {\n for(int j = 1; j < Ng - 1; j++) {\n for(int k = 1; k < Ng - 1; k++) {\n ax(i, j, k) = -(potential(i+1,j,k).real() - potential(i-1,j,k).real()) / (2*h);\n ay(i, j, k) = -(potential(i,j+1,k).real() - potential(i,j-1,k).real()) / (2*h);\n az(i, j, k) = -(potential(i,j,k+1).real() - potential(i,j,k-1).real()) / (2*h);\n }\n }\n }\n\n // interpolate acceleration field back from mesh to particles\n for(int col = 0; col < particles.cols(); col++) {\n float m = particles(0, col);\n float px = particles(1, col);\n float py = particles(2, col);\n float pz = particles(3, col);\n\n // (NGP)\n Vector3i ti = worldToGrid(px, py, pz);\n\n // F = m*a\n particles(7, col) = m * ax(ti(0), ti(1), ti(2));\n particles(8, col) = m * ay(ti(0), ti(1), ti(2));\n particles(9, col) = m * az(ti(0), ti(1), ti(2));\n }\n}\n\nconst MatrixData &Gravitysolver::PM::data()\n{\n return particles;\n}\n", "meta": {"hexsha": "7d248a2145d586756baf4afab8bf0103be5ab9b5", "size": 11723, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/gravitysolvers.cpp", "max_stars_repo_name": "azurite/AST-245-N-Body", "max_stars_repo_head_hexsha": "cc3e3acd61f62415c1e5f40c8aba5b93703837fa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/gravitysolvers.cpp", "max_issues_repo_name": "azurite/AST-245-N-Body", "max_issues_repo_head_hexsha": "cc3e3acd61f62415c1e5f40c8aba5b93703837fa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/gravitysolvers.cpp", "max_forks_repo_name": "azurite/AST-245-N-Body", "max_forks_repo_head_hexsha": "cc3e3acd61f62415c1e5f40c8aba5b93703837fa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.8456057007, "max_line_length": 115, "alphanum_fraction": 0.5033694447, "num_tokens": 3722, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7799929002541068, "lm_q2_score": 0.640635861701035, "lm_q1q2_score": 0.4996914237749792}} {"text": "#pragma once\n#include \n#include \n#include \"Bound3Intersect.hpp\"\n#include \"../Ray.hpp\"\n\nclass Bound3\n{\npublic:\n Bound3(Eigen::Vector3f min, Eigen::Vector3f max) : _min(min), _max(max){};\n\n Bound3Intersect intersect_ray(const Ray &ray) const\n {\n const auto &origin = ray.get_origin();\n const auto &dir = ray.get_direction();\n\n auto tIn = (_min - origin).cwiseQuotient(dir).minCoeff();\n auto tOut = (_max - origin).cwiseQuotient(dir).maxCoeff();\n\n if (tIn < std::numeric_limits::epsilon())\n return Bound3Intersect(0.0f, tOut);\n\n if (tOut - tIn > -std::numeric_limits::epsilon())\n return Bound3Intersect(tIn, tOut);\n\n return Bound3Intersect();\n };\n\n static std::unique_ptr union_bound3(const std::unique_ptr &box1, const std::unique_ptr &box2)\n {\n auto min = box1->_min.cwiseMin(box2->_min);\n auto max = box1->_max.cwiseMax(box2->_max);\n return std::make_unique(min, max);\n };\n\n const Eigen::Vector3f &min() { return _min; };\n\n const Eigen::Vector3f &max() { return _max; };\n\nprivate:\n Eigen::Vector3f _min;\n\n Eigen::Vector3f _max;\n};\n", "meta": {"hexsha": "4bea8e11303fa1eb343e92c67642bfc20641e95c", "size": 1223, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/render/accelerate/Bound3.hpp", "max_stars_repo_name": "yzx9/NeuronSdfViewer", "max_stars_repo_head_hexsha": "454164dfccf80b806aac3cd7cca09e2cb8bd3c2a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-12-31T10:29:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-31T10:29:56.000Z", "max_issues_repo_path": "src/render/accelerate/Bound3.hpp", "max_issues_repo_name": "yzx9/NeuronSdfViewer", "max_issues_repo_head_hexsha": "454164dfccf80b806aac3cd7cca09e2cb8bd3c2a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/render/accelerate/Bound3.hpp", "max_forks_repo_name": "yzx9/NeuronSdfViewer", "max_forks_repo_head_hexsha": "454164dfccf80b806aac3cd7cca09e2cb8bd3c2a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1777777778, "max_line_length": 121, "alphanum_fraction": 0.6287816844, "num_tokens": 347, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.779992900254107, "lm_q2_score": 0.640635854839898, "lm_q1q2_score": 0.49969141842334114}} {"text": "/*\n * SWE_Plane_Normal_Modes.hpp\n *\n * Created on: 17 Nov 2019\n * Author: Pedro Peixoto \n *\n * based on previous implementation by Martin Schreiber in swe_plane.cpp\n *\n */\n\n#ifndef SRC_PROGRAMS_SWE_PLANE_NORMAL_MODES_HPP_\n#define SRC_PROGRAMS_SWE_PLANE_NORMAL_MODES_HPP_\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#if SWEET_EIGEN\n#include \n#endif\n/**\n * SWE Plane normal mode\n */\nclass SWE_Plane_Normal_Modes\n{\npublic:\n\n\ttemplate \n\tstatic\n\tvoid normal_mode_analysis(\n\t\t\tPlaneData_Spectral &io_prog_h_pert, // h: surface height (perturbation)\n\t\t\tPlaneData_Spectral &io_prog_u, // u: velocity in x-direction\n\t\t\tPlaneData_Spectral &io_prog_v, // v: velocity in y-direction\n\t\t\tint number_of_prognostic_variables,\n\t\t\tSimulationVariables &i_simVars, // Simulation variables\n\t\t\tTCallbackClass *i_class,\n\t\t\tvoid(TCallbackClass::* const i_run_timestep_method)(void)\n\t)\n\t{\n\n\t\tconst PlaneDataConfig *planeDataConfig = io_prog_h_pert.planeDataConfig;\n\n\t\t// dummy time step to get time step size\n\t\tif (i_simVars.timecontrol.current_timestep_size <= 0)\n\t\t\tSWEETError(\"Normal mode analysis requires setting fixed time step size\");\n\n\t\t/*\n\t\t *\n\t\t * Mode-wise normal mode analysis\n\t\t *\n\t\t *\n\t\t */\n\n\t\tif (i_simVars.misc.normal_mode_analysis_generation == 4)\n\t\t{\n#if SWEET_EIGEN\n#if SWEET_USE_PLANE_SPECTRAL_DEALIASING\n\t\t\tSWEETError(\"SWE_Plane_Normal_Modes: This test was build for linear or linearized models, so please compile without dealising --plane-spectral-dealiasing=disable.\");\n#endif\n\n\n\t\t\t/*\n\t\t\t * Setup all output files\n\t\t\t */\n\t\t\tconst char* filename; //general filename\n\t\t\tchar buffer_real[1024];\n\n\t\t\tif (i_simVars.iodata.output_file_name == \"\")\n\t\t\t\tfilename = \"output_%s_t%020.8f.csv\";\n\t\t\telse\n\t\t\t\tfilename = i_simVars.iodata.output_file_name.c_str();\n\n\t\t\tsprintf(buffer_real, filename, \"normal_modes_plane\", i_simVars.timecontrol.current_timestep_size*i_simVars.iodata.output_time_scale);\n\t\t\tstd::ofstream file(buffer_real, std::ios_base::trunc);\n\t\t\tstd::cout << \"Writing normal mode analysis to files of the form '\" << buffer_real << \"'\" << std::endl;\n\n\t\t\t//Positive inertia-gravity modes\n\t\t\tsprintf(buffer_real, filename, \"normal_modes_plane_igpos\", i_simVars.timecontrol.current_timestep_size*i_simVars.iodata.output_time_scale);\n\t\t\tstd::ofstream file_igpos(buffer_real, std::ios_base::trunc);\n\n\t\t\t//Negative inertia-gravity modes\n\t\t\tsprintf(buffer_real, filename, \"normal_modes_plane_igneg\", i_simVars.timecontrol.current_timestep_size*i_simVars.iodata.output_time_scale);\n\t\t\tstd::ofstream file_igneg(buffer_real, std::ios_base::trunc);\n\n\t\t\t//Geostrophic modes\n\t\t\tsprintf(buffer_real, filename, \"normal_modes_plane_geo\", i_simVars.timecontrol.current_timestep_size*i_simVars.iodata.output_time_scale);\n\t\t\tstd::ofstream file_geo(buffer_real, std::ios_base::trunc);\n\n\t\t\t//std::cout << \"WARNING: OUTPUT IS TRANSPOSED!\" << std::endl;\n\n\t\t\t// use very high precision\n\t\t\tfile << std::setprecision(20);\n\t\t\tfile_igpos << std::setprecision(20);\n\t\t\tfile_igneg << std::setprecision(20);\n\t\t\tfile_geo << std::setprecision(20);\n\n\t\t\tfile << \"# dt \" << i_simVars.timecontrol.current_timestep_size << std::endl;\n\t\t\tfile << \"# g \" << i_simVars.sim.gravitation << std::endl;\n\t\t\tfile << \"# h \" << i_simVars.sim.h0 << std::endl;\n\t\t\tfile << \"# r \" << i_simVars.sim.sphere_radius << std::endl;\n\t\t\tfile << \"# f \" << i_simVars.sim.plane_rotating_f0 << std::endl;\n\n#if SWEET_USE_PLANE_SPECTRAL_SPACE\n\t\t\tint specmodes = planeDataConfig->get_spectral_iteration_range_area(0)+planeDataConfig->get_spectral_iteration_range_area(1);\n\t\t\tfile << \"# specnummodes \" << specmodes << std::endl;\n\t\t\tfile << \"# specrealresx \" << planeDataConfig->spectral_real_modes[0] << std::endl;\n\t\t\tfile << \"# specrealresy \" << planeDataConfig->spectral_real_modes[1] << std::endl;\n#endif\n\n\t\t\tfile << \"# physresx \" << planeDataConfig->physical_res[0] << std::endl;\n\t\t\tfile << \"# physresy \" << planeDataConfig->physical_res[1] << std::endl;\n\t\t\tfile << \"# normalmodegeneration \" << i_simVars.misc.normal_mode_analysis_generation << std::endl;\n\t\t\tfile << \"# antialiasing \";\n#if SWEET_USE_PLANE_SPECTRAL_DEALIASING\n\t\t\tfile << 1;\n#else\n\t\t\tfile << 0;\n#endif\n\t\t\tfile << std::endl;\n\n\t\t\tPlaneData_Spectral* prog[3] = {&io_prog_h_pert, &io_prog_u, &io_prog_v};\n\n\t\t\tint number_of_prognostic_variables = 3;\n\t\t\t//The basic state is with zero in all variables\n\t\t\t// The only non zero variable in the basic state is the total height\n\t\t\t// for which the constant is added within run_timestep()\n\t\t\tio_prog_h_pert.spectral_set_zero();\n\t\t\tio_prog_u.spectral_set_zero();\n\t\t\tio_prog_v.spectral_set_zero();\n\n\t\t\t//int num_timesteps = 1;\n\n\t\t\t// Timestep and perturbation\n\t\t\tdouble dt = i_simVars.timecontrol.current_timestep_size;\n\t\t\tdouble eps = dt;\n\n\t\t\t//Matrix representing discrete linear operator in spectral space\n\t\t\tEigen::MatrixXcf A(3,3) ;\n\t\t\t//Eigen solver\n\t\t\tEigen::ComplexEigenSolver ces;\n\t\t\t//Final eigenvalues\n\t\t\tstd::complex eval[3];\n\n\t\t\t//For each spectral mode\n\t\t\t//for (int r = 0; r < 2; r++) //only required to get the symmetric half of the spectrum\n\t\t\t//{\n\t\t\tint r = 0;\n\n\t\t\tfor (std::size_t i = planeDataConfig->spectral_data_iteration_ranges[r][0][0]; i < planeDataConfig->spectral_data_iteration_ranges[r][0][1]; i++)\n\t\t\t{\n\t\t\t\tstd::cout << \".\" << std::flush;\n\t\t\t\tfor (std::size_t j = planeDataConfig->spectral_data_iteration_ranges[r][1][0]; j < planeDataConfig->spectral_data_iteration_ranges[r][1][1]; j++)\n\t\t\t\t{\n\t\t\t\t\t//This is the mode to be analysed\n\t\t\t\t\t//std::cout << \"Mode (i,j)= (\" << i << \" , \" << j <<\")\"<< std::endl;\n\n\n\t\t\t\t\tfor (int outer_prog_id = 0; outer_prog_id < number_of_prognostic_variables; outer_prog_id++)\n\t\t\t\t\t{\n\n\t\t\t\t\t\t// reset time control\n\t\t\t\t\t\ti_simVars.timecontrol.current_timestep_nr = 0;\n\t\t\t\t\t\ti_simVars.timecontrol.current_simulation_time = 0;\n\n\t\t\t\t\t\tfor (int inner_prog_id = 0; inner_prog_id < number_of_prognostic_variables; inner_prog_id++)\n\t\t\t\t\t\t\tprog[inner_prog_id]->spectral_set_zero();\n\n\t\t\t\t\t\t// activate mode via real coefficient\n\t\t\t\t\t\tprog[outer_prog_id]->spectral_set(j, i, 1.0);\n\t\t\t\t\t\t//Activate the symetric couterpart of the mode (only needed if j>0 )\n\t\t\t\t\t\tif (j > 0)\n\t\t\t\t\t\t\tprog[outer_prog_id]->spectral_set(planeDataConfig->spectral_data_size[1]-j, i, 1.0);\n\n\t\t\t\t\t\t/*\n\t\t\t\t\t\t * RUN timestep\n\t\t\t\t\t\t */\n\t\t\t\t\t\t////prog[outer_prog_id]->request_data_physical();\n\t\t\t\t\t\t(i_class->*i_run_timestep_method)();\n\n\t\t\t\t\t\t/*\n\t\t\t\t\t\t * compute\n\t\t\t\t\t\t * 1/dt * (U(t+1) - U(t))\n\t\t\t\t\t\t */\n\t\t\t\t\t\t///////prog[outer_prog_id]->request_data_spectral();\n\n\t\t\t\t\t\tstd::complex val = prog[outer_prog_id]->spectral_get(j, i);\n\t\t\t\t\t\tval = val - 1.0; //subtract U(0) from mode\n\t\t\t\t\t\tprog[outer_prog_id]->spectral_set(j, i, val);\n\n\t\t\t\t\t\tfor (int inner_prog_id = 0; inner_prog_id < number_of_prognostic_variables; inner_prog_id++)\n\t\t\t\t\t\t\t(*prog[inner_prog_id]) /= eps;\n\n\t\t\t\t\t\tfor (int inner_prog_id = 0; inner_prog_id < number_of_prognostic_variables; inner_prog_id++)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tA(inner_prog_id,outer_prog_id)=prog[inner_prog_id]->spectral_get(j, i);;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\t//std::cout << \"Lik matrix\" << std::endl;\n\t\t\t\t\t//std::cout << A << std::endl;\n\n\t\t\t\t\t//std::cout<<\"Normal modes\" << std::endl;\n\t\t\t\t\tces.compute(A);\n\t\t\t\t\tfor(int i=0; i<3; i++)\n\t\t\t\t\t{\n\t\t\t\t\t\teval[i]=ces.eigenvalues()[i];\n\t\t\t\t\t\t//std::cout << \"Eigenvalue \"<< i << \" : \" << eval[i].real() <<\" \"<f) - we will adopt coriolis f to test as if > zero, since the exact freq is sqrt(f^2+cK*K)\n\t\t\t\t\t * -negative inertia-gravity (imag<-f)\n\t\t\t\t\t * -negative inertia-gravity (imag aprox 0) - we will fit all other modes here\n\t\t\t\t\t */\n\t\t\t\t\tint count_igpos=0;\n\t\t\t\t\tint count_igneg=0;\n\t\t\t\t\tint count_geo=0;\n\t\t\t\t\tfor(int i=0; i<3; i++)\n\t\t\t\t\t{\n\t\t\t\t\t\tif(eval[i].imag() > 0.5 * i_simVars.sim.plane_rotating_f0)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t//std::cout<< \"IG pos mode: \" << eval[i].imag() << std::endl;\n\t\t\t\t\t\t\t//file_igpos << eval[i].imag();\n\t\t\t\t\t\t\tfile_igpos << eval[i].real()<< \"\\t\" << eval[i].imag();\n\t\t\t\t\t\t\tfile_igpos << \"\\t\";\n\t\t\t\t\t\t\tcount_igpos++;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif(eval[i].imag() < - 0.5 * i_simVars.sim.plane_rotating_f0)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t//std::cout<< \"IG neg mode: \" << eval[i].imag() << std::endl;\n\t\t\t\t\t\t\t//file_igneg << eval[i].imag();\n\t\t\t\t\t\t\tfile_igneg << eval[i].real()<< \"\\t\" << eval[i].imag();\n\t\t\t\t\t\t\tfile_igneg << \"\\t\";\n\t\t\t\t\t\t\tcount_igneg++;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif(eval[i].imag() >= - 0.5 * i_simVars.sim.plane_rotating_f0 && eval[i].imag() <= 0.5 * i_simVars.sim.plane_rotating_f0 )\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t//std::cout<< \"IG geo mode: \" << eval[i].imag() << std::endl;\n\t\t\t\t\t\t\t//file_geo << eval[i].imag();\n\t\t\t\t\t\t\tfile_geo << eval[i].real()<< \"\\t\" << eval[i].imag();\n\t\t\t\t\t\t\tfile_geo << \"\\t\";\n\t\t\t\t\t\t\tcount_geo++;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t//Check if we got the correct modes\n\t\t\t\t\tif ( count_igpos * count_igneg * count_geo > 0 )\n\t\t\t\t\t{\n\t\t\t\t\t\tcount_igpos=0;\n\t\t\t\t\t\tcount_igneg=0;\n\t\t\t\t\t\tcount_geo=0;\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\tSWEETError(\"SWE_Plane_Normal_Modes: Could not separate modes!!\");\n\t\t\t\t\t}\n\n\t\t\t\t\t//std::cout<<\"-------------------------\" << std::endl;\n\t\t\t\t}\n\t\t\t\tfile_igpos << std::endl;\n\t\t\t\tfile_igneg << std::endl;\n\t\t\t\tfile_geo << std::endl;\n\t\t\t}\n\n\t\t\t//}\n\t\t\t//std::cout<<\"-------------------------\" << std::endl;\n\t\t\t//SWEETError(\"still needs work...\");\n#else\n\t\t\tSWEETError(\"SWE_Plane_Normal_Modes: Cannot test this without Eigen library. Please compile with --eigen=enable\");\n#endif\n\t\t}\n\t\t/*\n\t\t * Do a normal mode analysis using perturbation, see\n\t\t * Hillary Weller, John Thuburn, Collin J. Cotter,\n\t\t * \"Computational Modes and Grid Imprinting on Five Quasi-Uniform Spherical C Grids\"\n\t\t */\n\t\telse\n\t\t{\n\n\t\t\t//run_timestep();\n\t\t\tconst char* filename;\n\t\t\tchar buffer_real[1024];\n\n\t\t\tif (i_simVars.iodata.output_file_name == \"\")\n\t\t\t\tfilename = \"output_%s_normalmodes.csv\";\n\t\t\telse\n\t\t\t\tfilename = i_simVars.iodata.output_file_name.c_str();\n\n\n\t\t\tsprintf(buffer_real, filename, \"normal_modes_physical\", i_simVars.timecontrol.current_timestep_size*i_simVars.iodata.output_time_scale);\n\t\t\tstd::ofstream file(buffer_real, std::ios_base::trunc);\n\t\t\tstd::cout << \"Writing normal mode analysis to file '\" << buffer_real << \"'\" << std::endl;\n\n\t\t\tstd::cout << \"WARNING: OUTPUT IS TRANSPOSED!\" << std::endl;\n\n\t\t\t// use very high precision\n\t\t\tfile << std::setprecision(20);\n\n\t\t\tPlaneData_Spectral* prog[3] = {&io_prog_h_pert, &io_prog_u, &io_prog_v};\n\n\t\t\t/*\n\t\t\t * Maximum number of prognostic variables\n\t\t\t *\n\t\t\t * Advection e.g. has only one\n\t\t\t */\n\t\t\tif (number_of_prognostic_variables <= 0)\n\t\t\t\tSWEETError(\"simVars.pde.number_of_prognostic_variables must be set!\");\n\n\t\t\tif (number_of_prognostic_variables == 3)\n\t\t\t{\n\t\t\t\tio_prog_h_pert.spectral_set_zero();\n\t\t\t\tio_prog_u.spectral_set_zero();\n\t\t\t\tio_prog_v.spectral_set_zero();\n\t\t\t}\n\t\t\telse if (number_of_prognostic_variables == 1)\n\t\t\t{\n\t\t\t\tio_prog_h_pert.spectral_set_zero();\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tSWEETError(\"Not yet supported\");\n\t\t\t}\n\n#if 0\n\t\t\tif (i_simVars.disc.timestepping_method == SimulationVariables::Discretization::LEAPFROG_EXPLICIT)\n\t\t\t{\n\t\t\t\tSWEETError(\"Not yet tested and supported\");\n\t\t\t\tstd::cout << \"WARNING: Leapfrog time stepping doesn't make real sense since 1st step is based on RK-like method\" << std::endl;\n\t\t\t\tstd::cout << \"We'll do two Leapfrog time steps here to take the LF errors into account!\" << std::endl;\n\t\t\t\tstd::cout << \"Therefore, we also halve the time step size here\" << std::endl;\n\n\t\t\t\ti_simVars.timecontrol.current_timestep_size = 0.5*i_simVars.sim.CFL;\n\t\t\t\ti_simVars.sim.CFL = -i_simVars.timecontrol.current_timestep_size;\n\t\t\t}\n#endif\n\n\t\t\tint num_timesteps = 1;\n\t\t\tif (i_simVars.misc.normal_mode_analysis_generation >= 10)\n\t\t\t{\n\t\t\t\tif (i_simVars.timecontrol.max_timesteps_nr > 0)\n\t\t\t\t\tnum_timesteps = i_simVars.timecontrol.max_timesteps_nr;\n\t\t\t}\n\n\t\t\tif (i_simVars.timecontrol.max_simulation_time > 0)\n\t\t\t\tfile << \"# t \" << i_simVars.timecontrol.max_simulation_time << std::endl;\n\t\t\telse\n\t\t\t\tfile << \"# t \" << (num_timesteps*(-i_simVars.timecontrol.current_timestep_size)) << std::endl;\n\n\t\t\tfile << \"# g \" << i_simVars.sim.gravitation << std::endl;\n\t\t\tfile << \"# h \" << i_simVars.sim.h0 << std::endl;\n//\t\t\tfile << \"# r \" << i_simVars.sim.sphere_radius << std::endl;\n\t\t\tfile << \"# f \" << i_simVars.sim.plane_rotating_f0 << std::endl;\n\n#if SWEET_USE_PLANE_SPECTRAL_SPACE\n\t\t\tint specmodes = planeDataConfig->get_spectral_iteration_range_area(0)+planeDataConfig->get_spectral_iteration_range_area(1);\n\t\t\tfile << \"# specnummodes \" << specmodes << std::endl;\n\t\t\tfile << \"# specrealresx \" << planeDataConfig->spectral_real_modes[0] << std::endl;\n\t\t\tfile << \"# specrealresy \" << planeDataConfig->spectral_real_modes[1] << std::endl;\n#endif\n\n\t\t\tfile << \"# physresx \" << planeDataConfig->physical_res[0] << std::endl;\n\t\t\tfile << \"# physresy \" << planeDataConfig->physical_res[1] << std::endl;\n\t\t\tfile << \"# normalmodegeneration \" << i_simVars.misc.normal_mode_analysis_generation << std::endl;\n\t\t\tfile << \"# antialiasing \";\n\n#if SWEET_USE_PLANE_SPECTRAL_DEALIASING\n\t\t\tfile << 1;\n#else\n\t\t\tfile << 0;\n#endif\n\n\t\t\tfile << std::endl;\n\n\n\t\t\t// iterate over all prognostic variables\n\t\t\tfor (int outer_prog_id = 0; outer_prog_id < number_of_prognostic_variables; outer_prog_id++)\n\t\t\t{\n\t\t\t\tif (i_simVars.misc.normal_mode_analysis_generation == 1 || i_simVars.misc.normal_mode_analysis_generation == 11)\n\t\t\t\t{\n\t\t\t\t\t// iterate over physical space\n\t\t\t\t\tfor (std::size_t outer_i = 0; outer_i < planeDataConfig->physical_array_data_number_of_elements; outer_i++)\n\t\t\t\t\t{\n\t\t\t\t\t\t// reset time control\n\t\t\t\t\t\ti_simVars.timecontrol.current_timestep_nr = 0;\n\t\t\t\t\t\ti_simVars.timecontrol.current_simulation_time = 0;\n\n\t\t\t\t\t\tstd::cout << \".\" << std::flush;\n\n\t\t\t\t\t\tfor (int inner_prog_id = 0; inner_prog_id < number_of_prognostic_variables; inner_prog_id++)\n\t\t\t\t\t\t\tprog[inner_prog_id]->spectral_set_zero();\n\n\t\t\t\t\t\t// activate mode\n\t\t\t\t\t\t///prog[outer_prog_id]->request_data_physical();\n\t\t\t\t\t\t///prog[outer_prog_id]->physical_space_data[outer_i] = 1;\n\t\t\t\t\t\tPlaneData_Physical tmp = prog[outer_prog_id]->toPhys();\n\t\t\t\t\t\ttmp.physical_space_data[outer_i] = 1;\n\t\t\t\t\t\tprog[outer_prog_id]->loadPlaneDataPhysical(tmp);\n\n\t\t\t\t\t\t/*\n\t\t\t\t\t\t * RUN timestep\n\t\t\t\t\t\t */\n\n\t\t\t\t\t\t(i_class->*i_run_timestep_method)();\n\n\t\t\t\t\t\tif (i_simVars.misc.normal_mode_analysis_generation == 1)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t/*\n\t\t\t\t\t\t\t * compute\n\t\t\t\t\t\t\t * 1/dt * (U(t+1) - U(t))\n\t\t\t\t\t\t\t */\n\t\t\t\t\t\t\t///////prog[outer_prog_id]->request_data_physical();\n\t\t\t\t\t\t\t///////prog[outer_prog_id]->physical_space_data[outer_i] -= 1.0;\n\t\t\t\t\t\t\tPlaneData_Physical tmp2 = prog[outer_prog_id]->toPhys();\n\t\t\t\t\t\t\ttmp2.physical_space_data[outer_i] -= 1.0;\n\n\t\t\t\t\t\t\tfor (int inner_prog_id = 0; inner_prog_id < number_of_prognostic_variables; inner_prog_id++)\n\t\t\t\t\t\t\t\ttmp2 /= i_simVars.timecontrol.current_timestep_size;\n\t\t\t\t\t\t\t\t//(*prog[inner_prog_id]) /= i_simVars.timecontrol.current_timestep_size;\n\n\t\t\t\t\t\t\tprog[outer_prog_id]->loadPlaneDataPhysical(tmp2);\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\ttmp = prog[outer_prog_id]->toPhys();\n\t\t\t\t\t\tfor (int inner_prog_id = 0; inner_prog_id < number_of_prognostic_variables; inner_prog_id++)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t///prog[inner_prog_id]->request_data_physical();\n\t\t\t\t\t\t\tfor (std::size_t k = 0; k < planeDataConfig->physical_array_data_number_of_elements; k++)\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t///file << prog[inner_prog_id]->physical_space_data[k];\n\t\t\t\t\t\t\t\tfile << tmp.physical_space_data[k];\n\t\t\t\t\t\t\t\tif (inner_prog_id != number_of_prognostic_variables-1 || k != planeDataConfig->physical_array_data_number_of_elements-1)\n\t\t\t\t\t\t\t\t\tfile << \"\\t\";\n\t\t\t\t\t\t\t\telse\n\t\t\t\t\t\t\t\t\tfile << std::endl;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n#if 1\n\t\t\t\telse if (i_simVars.misc.normal_mode_analysis_generation == 3 || i_simVars.misc.normal_mode_analysis_generation == 13)\n\t\t\t\t{\n#if !SWEET_USE_PLANE_SPECTRAL_SPACE\n\t\t\t\t\tSWEETError(\"Only available with if plane spectral space is activated during compile time!\");\n#else\n\n\t\t\t\t\t// iterate over spectral space\n\t\t\t\t\tfor (int r = 0; r < 2; r++)\n\t\t\t\t\t{\n\n\t\t\t\t\t\tfor (std::size_t j = planeDataConfig->spectral_data_iteration_ranges[r][1][0]; j < planeDataConfig->spectral_data_iteration_ranges[r][1][1]; j++)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tfor (std::size_t i = planeDataConfig->spectral_data_iteration_ranges[r][0][0]; i < planeDataConfig->spectral_data_iteration_ranges[r][0][1]; i++)\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t// reset time control\n\t\t\t\t\t\t\t\ti_simVars.timecontrol.current_timestep_nr = 0;\n\t\t\t\t\t\t\t\ti_simVars.timecontrol.current_simulation_time = 0;\n\n\t\t\t\t\t\t\t\tstd::cout << \".\" << std::flush;\n\n\t\t\t\t\t\t\t\tfor (int inner_prog_id = 0; inner_prog_id < number_of_prognostic_variables; inner_prog_id++)\n\t\t\t\t\t\t\t\t\tprog[inner_prog_id]->spectral_set_zero();\n\n\t\t\t\t\t\t\t\t// activate mode via real coefficient\n\t\t\t\t\t\t\t\tprog[outer_prog_id]->spectral_set(j, i, 1.0);\n\n\t\t\t\t\t\t\t\t/*\n\t\t\t\t\t\t\t\t * RUN timestep\n\t\t\t\t\t\t\t\t */\n\t\t\t\t\t\t\t\t(i_class->*i_run_timestep_method)();\n\n\n\t\t\t\t\t\t\t\tif (i_simVars.misc.normal_mode_analysis_generation == 3)\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t/*\n\t\t\t\t\t\t\t\t\t * compute\n\t\t\t\t\t\t\t\t\t * 1/dt * (U(t+1) - U(t))\n\t\t\t\t\t\t\t\t\t */\n\t\t\t\t\t\t\t\t\t///prog[outer_prog_id]->request_data_spectral();\n\n\t\t\t\t\t\t\t\t\tstd::complex val = prog[outer_prog_id]->spectral_get(j, i);\n\t\t\t\t\t\t\t\t\tval = val - 1.0;\n\t\t\t\t\t\t\t\t\tprog[outer_prog_id]->spectral_set(j, i, val);\n\n\t\t\t\t\t\t\t\t\tfor (int inner_prog_id = 0; inner_prog_id < number_of_prognostic_variables; inner_prog_id++)\n\t\t\t\t\t\t\t\t\t\t(*prog[inner_prog_id]) /= i_simVars.timecontrol.current_timestep_size;\n\t\t\t\t\t\t\t\t}\n\n\n\t\t\t\t\t\t\t\tfor (int inner_prog_id = 0; inner_prog_id < number_of_prognostic_variables; inner_prog_id++)\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t///prog[inner_prog_id]->request_data_spectral();\n\n\t\t\t\t\t\t\t\t\t/*\n\t\t\t\t\t\t\t\t\t * REAL\n\t\t\t\t\t\t\t\t\t */\n\n\t\t\t\t\t\t\t\t\tfor (int r = 0; r < 2; r++)\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tfor (std::size_t j = planeDataConfig->spectral_data_iteration_ranges[r][1][0]; j < planeDataConfig->spectral_data_iteration_ranges[r][1][1]; j++)\n\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\tfor (std::size_t i = planeDataConfig->spectral_data_iteration_ranges[r][0][0]; i < planeDataConfig->spectral_data_iteration_ranges[r][0][1]; i++)\n\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\tfile << prog[inner_prog_id]->spectral_get(j, i).real();\n\t\t\t\t\t\t\t\t\t\t\t\tfile << \"\\t\";\n\t\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\n\n\t\t\t\t\t\t\t\tfor (int inner_prog_id = 0; inner_prog_id < number_of_prognostic_variables; inner_prog_id++)\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t/*\n\t\t\t\t\t\t\t\t\t * IMAG\n\t\t\t\t\t\t\t\t\t */\n\t\t\t\t\t\t\t\t\tint c = 0;\n\t\t\t\t\t\t\t\t\tfor (int r = 0; r < 2; r++)\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tfor (std::size_t j = planeDataConfig->spectral_data_iteration_ranges[r][1][0]; j < planeDataConfig->spectral_data_iteration_ranges[r][1][1]; j++)\n\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\tfor (std::size_t i = planeDataConfig->spectral_data_iteration_ranges[r][0][0]; i < planeDataConfig->spectral_data_iteration_ranges[r][0][1]; i++)\n\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\tfile << prog[inner_prog_id]->spectral_get(j, i).imag();\n\n\t\t\t\t\t\t\t\t\t\t\t\tif (inner_prog_id != number_of_prognostic_variables-1 || c != specmodes-1)\n\t\t\t\t\t\t\t\t\t\t\t\t\tfile << \"\\t\";\n\t\t\t\t\t\t\t\t\t\t\t\telse\n\t\t\t\t\t\t\t\t\t\t\t\t\tfile << std::endl;\n\n\t\t\t\t\t\t\t\t\t\t\t\tc++;\n\t\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n#endif\n\t\t\t\t}\n#else\n\t\t\t\telse if (i_simVars.misc.normal_mode_analysis_generation == 3 || i_simVars.misc.normal_mode_analysis_generation == 13)\n\t\t\t\t{\n\t\t\t\t\tPlaneData_SpectralComplex t1(planeDataConfig);\n\t\t\t\t\tPlaneData_SpectralComplex t2(planeDataConfig);\n\t\t\t\t\tPlaneData_SpectralComplex t3(planeDataConfig);\n\t\t\t\t\tPlaneDataComplex* prog_cplx[3] = {&t1, &t2, &t3};\n\n\t\t\t\t\t// iterate over spectral space\n\t\t\t\t\tfor (std::size_t outer_i = 0; outer_i < planeDataConfig->spectral_complex_array_data_number_of_elements; outer_i++)\n\t\t\t\t\t{\n\t\t\t\t\t\t// reset time control\n\t\t\t\t\t\ti_simVars.timecontrol.current_timestep_nr = 0;\n\t\t\t\t\t\ti_simVars.timecontrol.current_simulation_time = 0;\n\n\t\t\t\t\t\tstd::cout << \".\" << std::flush;\n\n\t\t\t\t\t\tfor (int inner_prog_id = 0; inner_prog_id < number_of_prognostic_variables; inner_prog_id++)\n\t\t\t\t\t\t\tprog_cplx[inner_prog_id]->spectral_set_zero();\n\n\t\t\t\t\t\t// activate mode via real coefficient\n\t\t\t\t\t\tprog_cplx[outer_prog_id]->request_data_spectral();\n\t\t\t\t\t\tprog_cplx[outer_prog_id]->spectral_space_data[outer_i].real(1);\n\n\t\t\t\t\t\t// convert PlaneData_SpectralComplex to PlaneData\n\t\t\t\t\t\tfor (int inner_prog_id = 0; inner_prog_id < number_of_prognostic_variables; inner_prog_id++)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t*prog[inner_prog_id] = Convert_PlaneDataSpectralComplex_To_PlaneDataSpectral::physical_convert(*prog_cplx[inner_prog_id]);\n\t\t\t\t\t\t\tprog[inner_prog_id]->spectral_zeroAliasingModes();\n\t\t\t\t\t\t}\n\n\n\t\t\t\t\t\t/*\n\t\t\t\t\t\t * RUN timestep\n\t\t\t\t\t\t */\n\t\t\t\t\t\t(i_class->*i_run_timestep_method)();\n\n\t\t\t\t\t\tfor (int inner_prog_id = 0; inner_prog_id < number_of_prognostic_variables; inner_prog_id++)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tprog[inner_prog_id]->spectral_zeroAliasingModes();\n#warning \"update this physical_convert maybe to spectral_convert\"\n\n\t\t\t\t\t\t\t*prog_cplx[inner_prog_id] = Convert_PlaneDataSpectral_To_PlaneDataSpectralComplex::physical_convert(*prog[inner_prog_id]);\n\n\t\t\t\t\t\t\tprog_cplx[inner_prog_id]->request_data_spectral();\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif (i_simVars.misc.normal_mode_analysis_generation == 3)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t/*\n\t\t\t\t\t\t\t * compute\n\t\t\t\t\t\t\t * 1/dt * (U(t+1) - U(t))\n\t\t\t\t\t\t\t */\n\t\t\t\t\t\t\tprog_cplx[outer_prog_id]->request_data_spectral();\n\t\t\t\t\t\t\tprog_cplx[outer_prog_id]->spectral_space_data[outer_i] -= 1.0;\n\n\t\t\t\t\t\t\tfor (int inner_prog_id = 0; inner_prog_id < number_of_prognostic_variables; inner_prog_id++)\n\t\t\t\t\t\t\t\tprog_cplx[inner_prog_id]->operator*=(1.0/i_simVars.timecontrol.current_timestep_size);\n\t\t\t\t\t\t}\n\n\n\t\t\t\t\t\t// convert PlaneData_SpectralComplex to PlaneData\n\t\t\t\t\t\tfor (int inner_prog_id = 0; inner_prog_id < number_of_prognostic_variables; inner_prog_id++)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tprog_cplx[inner_prog_id]->request_data_spectral();\n\n\t\t\t\t\t\t\t/*\n\t\t\t\t\t\t\t * REAL\n\t\t\t\t\t\t\t */\n\t\t\t\t\t\t\tfor (std::size_t k = 0; k < planeDataConfig->spectral_complex_array_data_number_of_elements; k++)\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tfile << prog_cplx[inner_prog_id]->spectral_space_data[k].real();\n\t\t\t\t\t\t\t\tfile << \"\\t\";\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t/*\n\t\t\t\t\t\t\t * IMAG\n\t\t\t\t\t\t\t */\n\t\t\t\t\t\t\tfor (std::size_t k = 0; k < planeDataConfig->spectral_complex_array_data_number_of_elements; k++)\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tfile << prog_cplx[inner_prog_id]->spectral_space_data[k].imag();\n\n\t\t\t\t\t\t\t\tif (inner_prog_id != number_of_prognostic_variables-1 || k != planeDataConfig->spectral_complex_array_data_number_of_elements-1)\n\t\t\t\t\t\t\t\t\tfile << \"\\t\";\n\t\t\t\t\t\t\t\telse\n\t\t\t\t\t\t\t\t\tfile << std::endl;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n#endif\n\t\t\t}\n\t\t}\n\t}\n\n\n\t~SWE_Plane_Normal_Modes()\n\t{\n\n\t}\n};\n\n#endif /* SRC_PROGRAMS_SWE_PLANE_NORMAL_MODES_HPP_ */\n", "meta": {"hexsha": "00e8ca51c1637e5bf29df4c8f6dbb970bc26ce3e", "size": 22579, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/programs/swe_plane_timeintegrators/SWE_Plane_Normal_Modes.hpp", "max_stars_repo_name": "schreibm/sweet", "max_stars_repo_head_hexsha": "a1b97e5862c3871177dff877dff825fd4b98a085", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/programs/swe_plane_timeintegrators/SWE_Plane_Normal_Modes.hpp", "max_issues_repo_name": "schreibm/sweet", "max_issues_repo_head_hexsha": "a1b97e5862c3871177dff877dff825fd4b98a085", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/programs/swe_plane_timeintegrators/SWE_Plane_Normal_Modes.hpp", "max_forks_repo_name": "schreibm/sweet", "max_forks_repo_head_hexsha": "a1b97e5862c3871177dff877dff825fd4b98a085", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.9520123839, "max_line_length": 167, "alphanum_fraction": 0.6523318127, "num_tokens": 6399, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.779992900254107, "lm_q2_score": 0.640635854839898, "lm_q1q2_score": 0.49969141842334114}} {"text": "#include \n#include \n#include \n#include \n#include \n#include // std::numeric_limits\n#include \n#include \n#include \n\nusing namespace boost ;\n\nconst int CRC16_CCITT_R=0x8408; //https://en.wikipedia.org/wiki/Cyclic_redundancy_check\nconst int DEFAULT_SIZE_OF_PACK=1;\nconst int DEFAULT_PACK_COUNT=10;\n\nunsigned int packSize = DEFAULT_SIZE_OF_PACK;\nunsigned int packCount = DEFAULT_PACK_COUNT;\n\nstd::vector data;\nstd::vector remote;\n\nstd::string sOutputFile = \"file.binary\";\nstd::string sInpuFile = \"\";\n\nunsigned int sawSize = std::numeric_limits::max();\nunsigned int rndSize = 0;\nunsigned int mulSize = 1;\nunsigned int crcPoly = CRC16_CCITT_R;\n\nint main(int argc, char* argv[])\n{\n\n namespace po = boost::program_options;\n po::options_description desc(\"Allowed options\");\n desc.add_options()\n (\"help,h\", \"show options\")\n (\"addcrc,c\", po::value (&packSize), \"count crc after this\")\n (\"addsum,u\", po::value (&packSize), \"count simple sum after this\")\n (\"datacount,d\", po::value (&packCount), \"count of genrated packs\")\n (\"outfile,f\", po::value (&sOutputFile), \"outputfilename, default:file.binary\")\n (\"inputfile,k\", po::value (&sInpuFile), \"get data from inputfile instead of algo\" )\n (\"saw,s\", po::value (&sawSize), \"algorithm: saw modulo (default:maxint)\")\n (\"rnd,r\", po::value (&rndSize), \"algorithm: random modulo (default:0)\")\n (\"mul,m\", po::value (&mulSize), \"algorithm: multiplication (default:1)\")\n (\"crcpoly,e\", po::value (&crcPoly), \"crc polynominal (default:0x8408)\")\n (\"print,p\", \"show data\")\n ;\n po::variables_map vm;\n po::store(po::command_line_parser(argc, argv).\n options(desc).run(), vm);\n po::notify(vm);\n\n if (vm.count(\"help\"))\n {\n std::cerr << argv[0] << \" - data generator\" << std::endl;\n std::cerr << \"The idea of this program is to generate data with or without crc.\"<< std::endl;\n std::cerr << \"For testing purposes.\"<< std::endl;\n std::cerr << \"Algorithm: ((rnd + (j*CRC_packSize + i)) * mul) % saw\" << std::endl;\n std::cerr << \"Examples:\"<< std::endl;\n std::cerr << \"\\t./xgenerator -d 8 -s 30 -p -m 10:\"<< std::endl;\n std::cerr << \"\\t\\t>0 10 20 0 10 20 0 10\"<< std::endl;\n std::cerr << \"\\t./xgenerator -d 8 -p\"<< std::endl;\n std::cerr << \"\\t\\t>0 1 2 3 4 5 6 7\"<< std::endl;\n std::cerr << \"\\t./xgenerator -d 8 -p -r 20\"<< std::endl;\n std::cerr << \"\\t\\t>3 7 19 18 17 20 12 19\"<< std::endl;\n std::cerr << \"\\t./xgenerator -d 2 -p -c 4\"<< std::endl;\n std::cerr << \"\\t\\t0 1 2 3 37368\"<< std::endl;\n std::cerr << \"\\t\\t4 5 6 7 24536\"<< std::endl;\n std::cerr << desc << std::endl ;\n return system::errc::success;\n }\n\n auto myfile = std::fstream(sOutputFile, std::ios::out | std::ios::binary);\n\n if (sInpuFile != \"\" && vm.count(\"addcrc\"))\n {\n\n auto remotefile = std::fstream(sInpuFile, std::ios::in | std::ios::binary);\n unsigned int val ;\n while (remotefile.read((char*)&val,sizeof(unsigned int)))\n {\n remote.push_back(val);\n }\n remotefile.close();\n\n assert( remote.size() >= packCount * packSize);\n }\n\n int crcqCnt=0; //Count of crc that landend in output file\n int sumqCnt=0; //Count of crc that landend in output file\n int cnt=0; //Count of data read from remote file\n for(auto j = 0; j < packCount; j++)\n {\n\n std::vector crcq;\n unsigned int sumq = 0;\n\n for(auto i = 0; i < packSize; ++i)\n {\n\n auto rndVector = 0;\n unsigned int val ;\n if (sInpuFile != \"\" && vm.count(\"addcrc\"))\n {\n val = remote[cnt++];\n }\n else\n {\n if (rndSize>0)\n {\n rndVector = rand()%rndSize;\n }\n val = ((rndVector + (j*packSize + i)) * mulSize)%sawSize ;\n }\n data.push_back(val);\n if (vm.count(\"addcrc\"))\n {\n crcq.push_back(val);\n }\n if (vm.count(\"print\"))\n {\n std::cout << val << \" \";\n }\n\n if (vm.count(\"addsum\"))\n {\n sumq+=val;\n }\n }\n\n if (vm.count(\"addcrc\"))\n {\n\n boost::crc_basic<16> crcfn(crcPoly, 0x0, 0x0, false, false);\n crcfn.process_bytes(crcq.data(), sizeof(unsigned int) * crcq.size());\n if (vm.count(\"print\"))\n {\n std::cout << crcfn.checksum() << std::endl ;\n }\n data.push_back(crcfn.checksum());\n crcqCnt++;\n }\n\n if (vm.count(\"addsum\"))\n {\n if (vm.count(\"print\"))\n {\n std::cout << sumq << std::endl ;\n }\n data.push_back(sumq);\n sumqCnt++;\n }\n }\n\n if (vm.count(\"print\"))\n {\n std::cout << std::endl ;\n }\n\n myfile.write((char*)data.data(), data.size()*sizeof(unsigned int));\n myfile.close();\n\n std::cout << \"count:\"<< packCount * packSize << std::endl;\n if (vm.count(\"addcrc\"))\n {\n std::cout << \"crc cnt:\"<< crcqCnt << std::endl;\n }\n if (vm.count(\"addsum\"))\n {\n std::cout << \"sum cnt:\"<< sumqCnt << std::endl;\n }\n std::cout << \"output:\" << sOutputFile << std::endl;\n std::cout << \"done.\" << std::endl;\n return system::errc::success;\n}\n", "meta": {"hexsha": "3ac7396feab0e8ba9835cbd5d1d94ae8947196de", "size": 5749, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "examples/generator/generator.cpp", "max_stars_repo_name": "michalwidera/abracadabradb", "max_stars_repo_head_hexsha": "13d4f66454b3b6af7e8353bd10186409230634e2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2019-12-04T16:51:14.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-09T15:13:13.000Z", "max_issues_repo_path": "examples/generator/generator.cpp", "max_issues_repo_name": "michalwidera/abracadabradb", "max_issues_repo_head_hexsha": "13d4f66454b3b6af7e8353bd10186409230634e2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9.0, "max_issues_repo_issues_event_min_datetime": "2019-12-07T21:21:41.000Z", "max_issues_repo_issues_event_max_datetime": "2020-01-17T16:44:36.000Z", "max_forks_repo_path": "examples/generator/generator.cpp", "max_forks_repo_name": "michalwidera/abracadabradb", "max_forks_repo_head_hexsha": "13d4f66454b3b6af7e8353bd10186409230634e2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4802259887, "max_line_length": 101, "alphanum_fraction": 0.5371368934, "num_tokens": 1580, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.779992900254107, "lm_q2_score": 0.640635854839898, "lm_q1q2_score": 0.49969141842334114}} {"text": "// File: morton_dense.cpp\n\n#include \n#include \n\nint main(int, char**)\n{\n using namespace mtl;\n\n // Z-order matrix\n morton_dense A(10, 10);\n\n A= 0;\n A(2, 3)= 7.0;\n A[2][4]= 3.0;\n std::cout << \"A is \\n\" << A << \"\\n\";\n \n // B is an N-order matrix with column-major 4x4 blocks, see paper\n morton_dense B(10, 10);\n\n // Assign the identity matrix times 3 to B\n B= 3;\n std::cout << \"B is \\n\" << B << \"\\n\";\n\n return 0;\n}\n\n", "meta": {"hexsha": "3dfbd4b581e8446b9cd643c41c5fbfea14f7fcdc", "size": 566, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "libs/numeric/mtl/examples/morton_dense.cpp", "max_stars_repo_name": "lit-uriy/mtl4-mirror", "max_stars_repo_head_hexsha": "37cf7c2847165d3537cbc3400cb5fde6f80e3d8b", "max_stars_repo_licenses": ["MTLL"], "max_stars_count": 24.0, "max_stars_repo_stars_event_min_datetime": "2019-03-26T15:25:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T10:00:45.000Z", "max_issues_repo_path": "libs/numeric/mtl/examples/morton_dense.cpp", "max_issues_repo_name": "lit-uriy/mtl4-mirror", "max_issues_repo_head_hexsha": "37cf7c2847165d3537cbc3400cb5fde6f80e3d8b", "max_issues_repo_licenses": ["MTLL"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2020-04-17T12:35:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-03T15:46:25.000Z", "max_forks_repo_path": "libs/numeric/mtl/examples/morton_dense.cpp", "max_forks_repo_name": "lit-uriy/mtl4-mirror", "max_forks_repo_head_hexsha": "37cf7c2847165d3537cbc3400cb5fde6f80e3d8b", "max_forks_repo_licenses": ["MTLL"], "max_forks_count": 10.0, "max_forks_repo_forks_event_min_datetime": "2019-12-01T13:40:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-14T08:39:54.000Z", "avg_line_length": 20.2142857143, "max_line_length": 69, "alphanum_fraction": 0.5777385159, "num_tokens": 199, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7799929002541068, "lm_q2_score": 0.640635854839898, "lm_q1q2_score": 0.4996914184233411}} {"text": "#include \"refill/system_models/linearized_system_model.h\"\n\n#include \n\n#include \n\n#include \"refill/distributions/gaussian_distribution.h\"\n\nnamespace refill {\n\nclass LinearizedSystemModelClass : public LinearizedSystemModel {\n public:\n LinearizedSystemModelClass(const size_t& state_dim,\n const DistributionInterface& system_noise)\n : LinearizedSystemModel(state_dim, system_noise) {}\n LinearizedSystemModelClass(const size_t& state_dim,\n const DistributionInterface& system_noise,\n const size_t& input_dim)\n : LinearizedSystemModel(state_dim, system_noise, input_dim) {}\n Eigen::VectorXd propagate(const Eigen::VectorXd& state,\n const Eigen::VectorXd& input,\n const Eigen::VectorXd& noise) const {\n return state + noise;\n }\n};\n\nTEST(LinearizedSystemModelTest, NoInputTest) {\n GaussianDistribution system_noise(Eigen::Vector2d::Zero(),\n Eigen::Matrix2d::Identity());\n\n LinearizedSystemModelClass system_model(2, system_noise);\n\n Eigen::MatrixXd state_jacobian = system_model.getStateJacobian(\n Eigen::Vector2d::Zero(), Eigen::VectorXd::Zero(0));\n\n ASSERT_EQ(state_jacobian.rows(), state_jacobian.cols());\n ASSERT_EQ(system_model.getStateDim(), state_jacobian.rows());\n ASSERT_EQ(Eigen::Matrix2d::Identity(), state_jacobian);\n\n Eigen::MatrixXd noise_jacobian = system_model.getNoiseJacobian(\n Eigen::Vector2d::Zero(), Eigen::VectorXd::Zero(0));\n\n ASSERT_EQ(system_model.getStateDim(), noise_jacobian.rows());\n ASSERT_EQ(system_model.getNoiseDim(), noise_jacobian.cols());\n ASSERT_EQ(Eigen::Matrix2d::Identity(), noise_jacobian);\n}\n\nTEST(LinearizedSystemModelTest, WithInputTest) {\n GaussianDistribution system_noise(Eigen::Vector2d::Zero(),\n Eigen::Matrix2d::Identity());\n\n LinearizedSystemModelClass system_model(2, system_noise, 2);\n\n Eigen::MatrixXd state_jacobian = system_model.getStateJacobian(\n Eigen::Vector2d::Zero(), Eigen::Vector2d::Zero());\n\n ASSERT_EQ(state_jacobian.rows(), state_jacobian.cols());\n ASSERT_EQ(system_model.getStateDim(), state_jacobian.rows());\n ASSERT_EQ(Eigen::Matrix2d::Identity(), state_jacobian);\n\n Eigen::MatrixXd noise_jacobian = system_model.getNoiseJacobian(\n Eigen::Vector2d::Zero(), Eigen::Vector2d::Zero());\n\n ASSERT_EQ(system_model.getStateDim(), noise_jacobian.rows());\n ASSERT_EQ(system_model.getNoiseDim(), noise_jacobian.cols());\n ASSERT_EQ(Eigen::Matrix2d::Identity(), noise_jacobian);\n}\n\n} // namespace refill\n", "meta": {"hexsha": "5b071fc4923e75cdfcb5b8ababcb4de440cc5490", "size": 2650, "ext": "cc", "lang": "C++", "max_stars_repo_path": "src/tests/linearized_system_model_test.cc", "max_stars_repo_name": "jwidauer/refill", "max_stars_repo_head_hexsha": "64947e0a8e15855f4a5ad048f09f8d38715bbe91", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2021-06-13T07:28:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-08T11:26:34.000Z", "max_issues_repo_path": "src/tests/linearized_system_model_test.cc", "max_issues_repo_name": "jwidauer/refill", "max_issues_repo_head_hexsha": "64947e0a8e15855f4a5ad048f09f8d38715bbe91", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/tests/linearized_system_model_test.cc", "max_forks_repo_name": "jwidauer/refill", "max_forks_repo_head_hexsha": "64947e0a8e15855f4a5ad048f09f8d38715bbe91", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2021-06-01T13:21:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-01T20:33:20.000Z", "avg_line_length": 37.8571428571, "max_line_length": 71, "alphanum_fraction": 0.7052830189, "num_tokens": 579, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7799929002541067, "lm_q2_score": 0.640635854839898, "lm_q1q2_score": 0.49969141842334097}} {"text": "#include \n#include \n#include \n#include \n#include \"elevation.hxx\"\n#include \"ObjWriter.hxx\"\n#include \"Delaunay.h\"\n\nusing namespace std;\nusing namespace osmwave;\n\nstatic projPJ wgs84 = pj_init_plus(\"+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs\");\n\n// Note: in place addition\nstatic void vecAdd(XYZ& a, const XYZ& b) {\n a.x += b.x;\n a.y += b.y;\n a.z += b.z;\n}\n\nstatic void vecSub(const XYZ& a, const XYZ& b, XYZ& result) {\n result.x = a.x - b.x;\n result.y = a.y - b.y;\n result.z = a.z - b.z;\n}\n\nstatic void triangleNormal(const XYZ* coords, const ITRIANGLE& tri, XYZ& normal) {\n XYZ u;\n XYZ v;\n\n vecSub(coords[tri.p2], coords[tri.p1], u);\n vecSub(coords[tri.p3], coords[tri.p1], v);\n\n normal.x = u.y*v.z - u.z*v.y;\n normal.y = u.z*v.x - u.x*v.z;\n normal.z = u.x*v.y - u.y*v.x;\n\n double l = sqrt(normal.x*normal.x+normal.y*normal.y+normal.z*normal.z);\n\n normal.x = normal.x / l;\n normal.y = normal.y / l;\n normal.z = normal.z / l;\n}\n\nstatic double findNearHeight(int rows, int cols, int index, int dx, int dy, XYZ* verts) {\n int s = index;\n do {\n s += dx * rows + dy;\n } while (std::isnan(verts[s].z));\n\n return verts[s].z;\n}\n\nstatic int thin(int rows, int cols, XYZ* verts, double tolerance) {\n int index = 0;\n int nonEmpty = 0;\n\n for (int i = 1; i < cols - 1; i++) {\n for (int j = 1; j < rows - 1; j++) {\n if (!std::isnan(verts[index].z)) {\n double e1 = verts[index].z;\n double e2 = findNearHeight(rows, cols, index, 1, -1, verts);\n double e3 = findNearHeight(rows, cols, index, 1, 0, verts);\n double e4 = findNearHeight(rows, cols, index, 1, 1, verts);\n double d2 = abs(e1 - e2);\n double d3 = abs(e1 - e3);\n double d4 = abs(e1 - e4);\n\n if (d2 <= tolerance &&\n d3 <= tolerance &&\n d4 <= tolerance) {\n verts[index].z = NAN;\n } else {\n nonEmpty++;\n }\n }\n\n index++;\n }\n }\n\n return nonEmpty;\n}\n\nvoid terrain_to_obj(const std::string& elevationPath, const std::string& projDef, double x1, double y1, double x2, double y2) {\n Elevation elevation(floor(y1), floor(x1), ceil(y2), ceil(x2), elevationPath);\n ObjWriter writer(cout);\n projPJ proj = pj_init_plus(projDef.c_str());\n double step = 1.0 / 3600;\n int rows = (int)floor((y2 - y1) / step + 1);\n int cols = (int)floor((x2 - x1) / step + 1);\n double bounds[] = {x1*DEG_TO_RAD, y1*DEG_TO_RAD, x2*DEG_TO_RAD, y2*DEG_TO_RAD};\n\n pj_transform(wgs84, proj, 2, 2, (double*)&bounds, (double*)&bounds + 1, nullptr);\n\n cerr << \"rows: \" << rows << \", cols: \" << cols << endl;\n cerr << \"bounds: \" << bounds[0] << \", \" << bounds[1] << \" - \" << bounds[2] << \", \" << bounds[3] << endl;\n\n cerr << \"Calculating vertices...\" << endl;\n XYZ* coords = new XYZ[rows * cols + 3];\n XYZ* normals = new XYZ[rows * cols];\n\n int i = 0;\n // Having columns as outer loop ensures x will be growing,\n // which is a requirement for the triangulation algorithm,\n // as long as projection is west to east.\n for (int c = 0; c < cols; c++) {\n double x = bounds[0] + (bounds[2] - bounds[0]) * c / cols;\n for (int r = 0; r < rows; r++) {\n double y = bounds[1] + (bounds[3] - bounds[1]) * r / rows;\n double ll[2] = {x, y};\n pj_transform(proj, wgs84, 1, 2, (double*)&ll, (double*)&ll + 1, nullptr);\n\n XYZ& coord = coords[i++];\n coord.x = x;\n coord.y = y;\n coord.z = elevation.elevation(ll[1]*RAD_TO_DEG, ll[0]*RAD_TO_DEG);\n\n //cerr << (ll[0] * RAD_TO_DEG) << \", \" << (ll[1] * RAD_TO_DEG) << \" (\" << coord.x << \", \" << coord.y << \"): \" << coord.z << endl;\n }\n }\n\n int startCount = rows * cols,\n lastCount = 0,\n count = -1;\n cerr << \"Thinning \" << startCount << \" vertices...\" << endl;\n while (lastCount != count) {\n lastCount = count;\n count = thin(rows, cols, coords, 2);\n }\n\n int j = 0;\n for (int i = 0; i < rows * cols; i++) {\n if (!std::isnan(coords[i].z)) {\n coords[j++] = coords[i];\n }\n }\n cerr << \"Thinned to \" << j << \" vertices\" << endl;\n\n cerr << \"Triangulating...\" << endl;\n ITRIANGLE *tris = new ITRIANGLE[3 * rows * cols];\n int numTriangles;\n Triangulate(j, coords, tris, numTriangles);\n cerr << numTriangles << \" triangles\" << endl;\n\n memset((void*)normals, 0, sizeof(XYZ) * j);\n for (int i = 0; i < numTriangles; i++) {\n XYZ normal;\n triangleNormal(coords, tris[i], normal);\n vecAdd(normals[tris[i].p1], normal);\n vecAdd(normals[tris[i].p2], normal);\n vecAdd(normals[tris[i].p3], normal);\n }\n\n writer.checkpoint();\n for (int i = 0; i < j; i++) {\n writer.vertex(coords[i].y, coords[i].z, coords[i].x, normals[i].y, normals[i].z, normals[i].x);\n }\n\n for (int i = 0; i < numTriangles; i++) {\n writer.beginFace();\n writer << tris[i].p1 << tris[i].p2 << tris[i].p3;\n writer.endFace();\n }\n\n delete coords;\n delete normals;\n delete tris;\n}\n\nint main(int argc, char* argv[]) {\n namespace po = boost::program_options;\n po::options_description desc(\"Options\");\n desc.add_options()\n (\"elevation_dir,e\", po::value()->required(), \"Set directory containing elevation data\")\n (\"proj,p\", po::value(), \"Projection definition\")\n (\"x1\", po::value()->required(), \"X1\")\n (\"y1\", po::value()->required(), \"Y1\")\n (\"x2\", po::value()->required(), \"X2\")\n (\"y2\", po::value()->required(), \"Y2\");\n po::positional_options_description positionOptions;\n positionOptions.add(\"x1\", 1);\n positionOptions.add(\"y1\", 1);\n positionOptions.add(\"x2\", 1);\n positionOptions.add(\"y2\", 1);\n\n po::variables_map vm;\n try {\n po::store(po::command_line_parser(argc, argv)\n .options(desc)\n .positional(positionOptions)\n .run(), vm);\n\n po::notify(vm);\n } catch (po::error& e) {\n cerr << \"Error \" << e.what() << endl << endl;\n cerr << desc << endl;\n return 1;\n }\n\n const string& elevPath(vm[\"elevation_dir\"].as());\n const string* projDef = nullptr;\n const double x1 = vm[\"x1\"].as();\n const double y1 = vm[\"y1\"].as();\n const double x2 = vm[\"x2\"].as();\n const double y2 = vm[\"y2\"].as();\n\n if (vm.count(\"proj\")) {\n projDef = &vm[\"proj\"].as();\n } else {\n ostringstream stream;\n stream << \"+proj=tmerc +lat_0=\" << ((y1 + y2) / 2) << \" +lon_0=\" << ((x1 + x2) / 2) << \" +k=1.000000 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs\";\n projDef = new string(stream.str());\n }\n\n terrain_to_obj(elevPath, *projDef, x1, y1, x2, y2);\n\n return 0;\n}\n\n", "meta": {"hexsha": "eca116be3640c1de8b4a9b9592eda5333a4d8231", "size": 7081, "ext": "cxx", "lang": "C++", "max_stars_repo_path": "src/terrain.cxx", "max_stars_repo_name": "perliedman/osmwave", "max_stars_repo_head_hexsha": "a0ffa931844702cdc31b83f27d2632651973026a", "max_stars_repo_licenses": ["0BSD"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2015-12-05T17:27:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T04:45:41.000Z", "max_issues_repo_path": "src/terrain.cxx", "max_issues_repo_name": "perliedman/osmwave", "max_issues_repo_head_hexsha": "a0ffa931844702cdc31b83f27d2632651973026a", "max_issues_repo_licenses": ["0BSD"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2020-08-14T10:26:21.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-17T10:33:22.000Z", "max_forks_repo_path": "src/terrain.cxx", "max_forks_repo_name": "perliedman/osmwave", "max_forks_repo_head_hexsha": "a0ffa931844702cdc31b83f27d2632651973026a", "max_forks_repo_licenses": ["0BSD"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2019-11-08T10:15:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-23T12:20:49.000Z", "avg_line_length": 32.0407239819, "max_line_length": 168, "alphanum_fraction": 0.5333992374, "num_tokens": 2137, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8652240964782011, "lm_q2_score": 0.5774953651858118, "lm_q1q2_score": 0.4996629055632428}} {"text": "#ifndef MLT_MODELS_TRANSFORMERS_SPARSE_TIED_AUTOENCODER_HPP\n#define MLT_MODELS_TRANSFORMERS_SPARSE_TIED_AUTOENCODER_HPP\n\n#include \n#include \n#include \n\n#include \n\n#include \"transformer.hpp\"\n#include \"../implementations/autoencoder.hpp\"\n#include \"../../utils/eigen.hpp\"\n\nnamespace mlt {\nnamespace models {\nnamespace transformers {\n\tusing namespace utils::eigen;\n\n\ttemplate \n\tclass SparseTiedAutoencoder : public Transformer> {\n\tpublic:\n\t\ttemplate , HiddenActivation>::value\n\t\t\t&& is_convertible, ReconstructionActivation>::value\n\t\t\t&& is_convertible, Optimizer>::value>>\n\t\texplicit SparseTiedAutoencoder(int hidden_units, H&& hidden_activation, R&& reconstruction_activation, O&& optimizer, double regularization,\n\t\tdouble sparsity, double sparsity_weight) : _hidden_units(hidden_units), _hidden_activation(forward(hidden_activation)),\n\t\t\t_reconstruction_activation(forward(reconstruction_activation)), _optimizer(forward(optimizer)), _regularization(regularization),\n\t\t\t_sparsity(sparsity), _sparsity_weight(sparsity_weight) {}\n\n\t\tResult transform(Features input) const {\n\t\t\tassert(_fitted);\n\t\t\treturn _hidden_activation.compute((_weights * input).colwise() + _hidden_intercepts);\n\t\t}\n\n\t\tSelf& fit(Features input, bool cold_start = true) {\n\t\t\tVectorXd init(_hidden_units * input.rows() + _hidden_units + input.rows());\n\n\t\t\tif (_fitted && !cold_start) {\n\t\t\t\tinit.block(0, 0, _weights.size(), 1) = ravel(_weights);\n\t\t\t\tinit.block(_weights.size(), 0, _hidden_intercepts.size(), 1) = _hidden_intercepts;\n\n\t\t\t\tinit.block(_weights.size() + _hidden_intercepts.size(),\n\t\t\t\t\t0, _reconstruction_intercepts.size(), 1) = _reconstruction_intercepts;\n\t\t\t} else {\n\t\t\t\tinit = (init.setRandom() * 4 / sqrt(6.0 / (_hidden_units + input.rows())));\n\t\t\t}\n\n\t\t\tVectorXd coeffs = _optimizer(*this, input, input, init, cold_start);\n\n\t\t\t_weights = unravel(coeffs.block(0, 0, _hidden_units * input.rows(), 1), _hidden_units, input.rows());\n\t\t\t_hidden_intercepts = coeffs.block(_hidden_units * input.rows(), 0, _hidden_units, 1);\n\t\t\t_reconstruction_intercepts = coeffs.block(_hidden_units * input.rows() + _hidden_units, 0, input.rows(), 1);\n\n\t\t\t_fitted = true;\n\n\t\t\treturn _self();\n\t\t}\n\n\t\tusing Transformer::fit;\n\n\t\tauto loss(VectorXdRef coeffs, Features input, Features target) const {\n\t\t\tauto weights = unravel(coeffs.block(0, 0, _hidden_units * input.rows(), 1), _hidden_units, input.rows());\n\t\t\tauto hidden_intercepts = coeffs.block(_hidden_units * input.rows(), 0, _hidden_units, 1);\n\t\t\tauto reconstruction_intercepts = coeffs.block(_hidden_units * input.rows() + _hidden_units, 0, input.rows(), 1);\n\n\t\t\treturn implementations::autoencoder::sparse_loss(_hidden_activation, _reconstruction_activation, weights, hidden_intercepts,\n\t\t\t\tweights.transpose(), reconstruction_intercepts, _regularization, _sparsity, _sparsity_weight, input, target);\n\t\t}\n\n\t\tauto gradient(VectorXdRef coeffs, Features input, Features target) const {\n\t\t\tauto weights = unravel(coeffs.block(0, 0, _hidden_units * input.rows(), 1), _hidden_units, input.rows());\n\t\t\tauto hidden_intercepts = coeffs.block(_hidden_units * input.rows(), 0, _hidden_units, 1);\n\t\t\tauto reconstruction_intercepts = coeffs.block(_hidden_units * input.rows() + _hidden_units, 0, input.rows(), 1);\n\n\t\t\tMatrixXd weights_grad, weights_transp_grad;\n\t\t\tVectorXd hid_inter_grad, rec_inter_grad;\n\t\t\ttie(weights_grad, hid_inter_grad, weights_transp_grad, rec_inter_grad) = implementations::autoencoder::sparse_gradient(_hidden_activation,\n\t\t\t\t_reconstruction_activation, weights, hidden_intercepts, weights.transpose(), reconstruction_intercepts, _regularization, \n\t\t\t\t_sparsity, _sparsity_weight, input, target);\n\n\t\t\tVectorXd gradient(coeffs.rows());\n\n\t\t\tgradient.block(0, 0, weights_grad.size(), 1) = ravel(weights_grad + weights_transp_grad.transpose());\n\t\t\tgradient.block(weights_grad.size(), 0, hid_inter_grad.size(), 1) = hid_inter_grad;\n\n\t\t\tgradient.block(weights_grad.size() + hid_inter_grad.size(),\n\t\t\t\t0, rec_inter_grad.size(), 1) = rec_inter_grad;\n\n\t\t\treturn gradient;\n\t\t}\n\n\t\tauto loss_and_gradient(VectorXdRef coeffs, Features input, Features target) const {\n\t\t\tauto weights = unravel(coeffs.block(0, 0, _hidden_units * input.rows(), 1), _hidden_units, input.rows());\n\t\t\tauto hidden_intercepts = coeffs.block(_hidden_units * input.rows(), 0, _hidden_units, 1);\n\t\t\tauto reconstruction_intercepts = coeffs.block(_hidden_units * input.rows() + _hidden_units, 0, input.rows(), 1);\n\n\t\t\tdouble loss;\n\t\t\tMatrixXd weights_grad, weights_transp_grad;\n\t\t\tVectorXd hid_inter_grad, rec_inter_grad;\n\t\t\ttie(loss, weights_grad, hid_inter_grad, weights_transp_grad, rec_inter_grad) = implementations::autoencoder::sparse_loss_and_gradient(\n\t\t\t\t_hidden_activation, _reconstruction_activation, weights, hidden_intercepts, weights.transpose(), reconstruction_intercepts,\n\t\t\t\t_regularization, _sparsity, _sparsity_weight, input, target);\n\n\t\t\tVectorXd gradient(coeffs.rows());\n\n\t\t\tgradient.block(0, 0, weights_grad.size(), 1) = ravel(weights_grad + weights_transp_grad.transpose());\n\t\t\tgradient.block(weights_grad.size(), 0, hid_inter_grad.size(), 1) = hid_inter_grad;\n\n\t\t\tgradient.block(weights_grad.size() + hid_inter_grad.size(),\n\t\t\t\t0, rec_inter_grad.size(), 1) = rec_inter_grad;\n\n\t\t\treturn make_tuple(loss, gradient);\n\t\t}\n\n\tprotected:\n\t\tint _hidden_units;\n\t\tHiddenActivation _hidden_activation;\n\t\tReconstructionActivation _reconstruction_activation;\n\t\tOptimizer _optimizer;\n\t\tdouble _regularization;\n\t\tdouble _sparsity;\n\t\tdouble _sparsity_weight;\n\n\t\tMatrixXd _weights;\n\t\tVectorXd _hidden_intercepts;\n\t\tVectorXd _reconstruction_intercepts;\n\t};\n\n\ttemplate \n\tauto create_sparse_tied_autoencoder(int hidden_units, HiddenActivation&& hidden_activation,\n\tReconstructionActivation&& reconstruction_activation, Optimizer&& optimizer,\n\tdouble regularization, double sparsity, double sparsity_weight) {\n\t\treturn SparseTiedAutoencoder(\n\t\t\thidden_units,\n\t\t\tforward(hidden_activation), \n\t\t\tforward(reconstruction_activation),\n\t\t\tforward(optimizer),\n\t\t\tregularization, sparsity, sparsity_weight);\n\t}\n}\n}\n}\n#endif", "meta": {"hexsha": "626ead3dc3c567dc548f82ab4764fcac84bf45b9", "size": 6500, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/mlt/models/transformers/sparse_tied_autoencoder.hpp", "max_stars_repo_name": "fedeallocati/MachineLearningToolkit", "max_stars_repo_head_hexsha": "8614ee2c8c5211a3eefceb10a50576e0485cefd9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6.0, "max_stars_repo_stars_event_min_datetime": "2015-08-31T11:43:19.000Z", "max_stars_repo_stars_event_max_datetime": "2018-07-22T11:03:47.000Z", "max_issues_repo_path": "src/mlt/models/transformers/sparse_tied_autoencoder.hpp", "max_issues_repo_name": "fedeallocati/MachineLearningToolkit", "max_issues_repo_head_hexsha": "8614ee2c8c5211a3eefceb10a50576e0485cefd9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/mlt/models/transformers/sparse_tied_autoencoder.hpp", "max_forks_repo_name": "fedeallocati/MachineLearningToolkit", "max_forks_repo_head_hexsha": "8614ee2c8c5211a3eefceb10a50576e0485cefd9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.1388888889, "max_line_length": 142, "alphanum_fraction": 0.7663076923, "num_tokens": 1568, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8652240825770432, "lm_q2_score": 0.5774953651858118, "lm_q1q2_score": 0.49966289753538856}} {"text": "#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n\nusing namespace std;\nusing namespace boost::gil;\n\n// value of strong edge pixel\nstatic const int TOP_VALUE = 255;\n// value of weak (maybe edge) pixel\nstatic const int MIDDLE_VALUE = 150;\n// value of definitely not-edge pixel\nstatic const int BOTTOM_VALUE = 0;\n\n// Example how to detect edges via Canny algorithm\n// Read more about Canny edge detection algorithm on\n// https://docs.opencv.org/3.1.0/da/d22/tutorial_py_canny.html\n\ntemplate \nvoid gaussian_blur(const SrcView &src)\n{\n //1-D Gaussian blur kernel with kernel size 5, Sigma 1.0\n float gaussian[] = {0.06136f, 0.24477f, 0.38774f, 0.24477f, 0.06136f};\n kernel_1d kernel(gaussian, 5, 2);\n\n convolve_rows(src, kernel, src, convolve_option_output_ignore);\n convolve_cols(src, kernel, src, convolve_option_output_ignore);\n}\n\n// Function calculates image gradient (intensity and direction)\n// Smoothened image is filtered with a Sobel kernels in both horizontal and vertical\n// direction to get first derivative in horizontal direction (Gx) and vertical direction (Gy).\n// Gradient manitude is found as G = sqrt(Gx^2 + Gy^2)\n// Direction is found as angle = arctan(Gy / Gx)\n// More information about sobel filtering on https://en.wikipedia.org/wiki/Sobel_operator\nvoid sobel_filtering(const gray8c_view_t &src,\n const gray8_view_t &magnitude, const gray32f_view_t &slope)\n{\n // This we used 1-D convolution\n // Gx = second * (first * img)\n // Gy = first * (second * img)\n // where * is convolution\n float first_sobel[] = {1.f, 0.f, -1.f};\n float second_sobel[] = {1.f, 2.f, 1.f};\n\n kernel_1d first_sobel_kernel(first_sobel, 3, 1);\n kernel_1d second_sobel_kernel(second_sobel, 3, 1);\n\n //16 bits signed matrix is used in order to avoid char overflow\n gray16s_image_t vertical(src.dimensions());\n gray16s_image_t horizontal(src.dimensions());\n\n convolve_rows(src, first_sobel_kernel,\n view(vertical), convolve_option_output_zero);\n convolve_cols(const_view(vertical), second_sobel_kernel,\n view(vertical), convolve_option_output_zero);\n\n convolve_rows(src, second_sobel_kernel, view(horizontal),\n convolve_option_output_zero);\n convolve_cols(const_view(horizontal), first_sobel_kernel,\n view(horizontal), convolve_option_output_zero);\n\n // Magnitude and angle calculation\n auto ver_it = view(vertical).begin();\n auto hor_it = view(horizontal).begin();\n auto slope_it = slope.begin();\n\n for (auto mag_it = magnitude.begin(); mag_it != magnitude.end();\n ++mag_it, ++ver_it, ++hor_it, ++slope_it) {\n\n // std::min was used to be sure, that pixel max value less than 256\n *mag_it = std::min(UINT8_MAX,\n (int)std::sqrt(\n std::pow((int)(*ver_it), 2) + std::pow((int)(*hor_it), 2)));\n\n *slope_it = (float)std::atan2((int)(*hor_it), (int)(*ver_it));\n }\n}\n\n// Non-maximum suppression method is perfomed to thin out edges of the image\n// Method goes throug all the points on the gradient intensity matrix and\n// finds the pixels with maximum value in the edge direction\nvoid non_maximal_suppression(const gray8c_view_t &magnitude, const gray32fc_view_t &angle,\n const gray8_view_t &dst)\n{\n // Dst matrix fills with 0\n fill_pixels(dst, int8_t(0));\n\n auto mag_loc = magnitude.xy_at(1, 1);\n int q, r;\n int index;\n int curVal;\n for (int y = 1; y < magnitude.height() - 1; ++y) {\n auto dst_it = dst.row_begin(y);\n auto angle_it = angle.row_begin(y);\n\n for (int x = 1; x < dst.width() - 1; ++x, ++angle_it, ++dst_it, ++mag_loc.x()) {\n // Index helps to find direction of the edge\n // Pixel has 8 neighbors and 4 possible directions\n // (-1,-1) (0,-1) (1,-1)\n // (-1, 0) (0, 0) (1, 0)\n // (-1, 1) (0, 1) (1, 1)\n // On each direction 2 possible neighbors\n // If both of them is smaller than current value (0, 0),\n // then current value is saved in output image\n index = (int)((*angle_it)[0] * 8 / M_PI);\n index = (index < 0) ? index + 8 : index;\n\n curVal = mag_loc(0, 0);\n\n switch (index) {\n // Horizontal direction\n case 0:\n case 7:\n case 8:\n q = mag_loc(1, 0);\n r = mag_loc(-1, 0);\n break;\n // 45 degree\n case 1:\n case 2:\n q = mag_loc(1, 1);\n r = mag_loc(-1, -1);\n break;\n // Vertical direction\n case 3:\n case 4:\n q = mag_loc(0, 1);\n r = mag_loc(0, -1);\n break;\n // 135 degree\n case 5:\n case 6:\n q = mag_loc(1, -1);\n r = mag_loc(-1, 1);\n break;\n }\n\n if ((curVal >= q) && (curVal >= r)) {\n *dst_it = curVal;\n }\n }\n mag_loc += point2(-dst.width() + 2, 1);\n }\n}\n\nvoid hysteresis_threshold(const gray8_view_t &dst, int minVal, int maxVal)\n{\n // Histogram calculation of 3 groups: strong pixel, weak pixel, definitely not-edge pixel\n for_each_pixel(dst, [minVal, maxVal](gray8_pixel_t &pixel) {\n pixel = (pixel < minVal) ? BOTTOM_VALUE :\n ((pixel > maxVal) ? TOP_VALUE : MIDDLE_VALUE);\n });\n\n // Weak pixels check\n // If weak pixel has a strong neighbor, then it is strong one\n // otherwise it is definitely not-edge pixel\n auto dst_loc = dst.xy_at(1, 1);\n for (int i = 1; i < dst.height() - 1; ++i) {\n for (int j = 1; j < dst.width() - 1; ++j, ++dst_loc.x()) {\n if (dst_loc(0, 0) == MIDDLE_VALUE) {\n if (dst_loc(-1, -1) == TOP_VALUE || dst_loc(-1, 0) == TOP_VALUE ||\n dst_loc(-1, 1) == TOP_VALUE || dst_loc(1, -1) == TOP_VALUE ||\n dst_loc(1, 0) == TOP_VALUE || dst_loc(1, 1) == TOP_VALUE ||\n dst_loc(0, -1) == TOP_VALUE || dst_loc(0, 1) == TOP_VALUE)\n dst_loc(0, 0) = TOP_VALUE;\n else\n dst_loc(0, 0) = BOTTOM_VALUE;\n }\n }\n dst_loc += point2(-dst.width() + 2, 1);\n }\n}\n\nvoid canny_edge_detection(const rgb8c_view_t &src,\n const gray8_view_t &dst, int minVal, int maxVal)\n{\n // Canny edge detection algorithm works on grayscale images only\n gray8_image_t gray_img(src.dimensions());\n copy_pixels(color_converted_view(src), view(gray_img));\n\n gaussian_blur(view(gray_img));\n\n gray32f_image_t angle(src.dimensions());\n gray8_image_t magnitude(src.dimensions());\n sobel_filtering(view(gray_img), view(magnitude), view(angle));\n\n non_maximal_suppression(view(magnitude), view(angle), dst);\n\n hysteresis_threshold(dst, minVal, maxVal);\n}\n\nint main(int argc, char *argv[])\n{\n char *input = \"test.jpg\";\n char *output = \"canny.jpg\";\n int min_threshold_value = 30;\n int max_threshold_value = 70;\n\n if (argc >= 2 && (argv[1][0] == '-') && (argv[1][1] == 'H' || argv[1][1] == 'h')) {\n printf(\"canny [path_to_input [path_to_output \"\n \"[min_threshold_value [max_threshold_value]]]]\\n\");\n return 0;\n }\n if (argc >= 2) input = argv[1];\n if (argc >= 3) output = argv[2];\n if (argc >= 4) min_threshold_value = atoi(argv[3]);\n if (argc >= 5) max_threshold_value = atoi(argv[4]);\n\n if (min_threshold_value > max_threshold_value)\n std::swap(min_threshold_value, max_threshold_value);\n\n rgb8_image_t img;\n read_image(input, img, jpeg_tag{});\n\n gray8_image_t res_img(img.dimensions());\n canny_edge_detection(const_view(img), view(res_img),\n min_threshold_value, max_threshold_value);\n write_view(output, view(res_img), jpeg_tag{});\n return 0;\n}\n", "meta": {"hexsha": "1f8af033ffa006bc4536d6563b6978a70440f445", "size": 8534, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "example/canny.cpp", "max_stars_repo_name": "Antropovi/gil", "max_stars_repo_head_hexsha": "1f67b483956e655a391906a1c84f40572d2d8403", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "example/canny.cpp", "max_issues_repo_name": "Antropovi/gil", "max_issues_repo_head_hexsha": "1f67b483956e655a391906a1c84f40572d2d8403", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "example/canny.cpp", "max_forks_repo_name": "Antropovi/gil", "max_forks_repo_head_hexsha": "1f67b483956e655a391906a1c84f40572d2d8403", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.5947136564, "max_line_length": 94, "alphanum_fraction": 0.5925708929, "num_tokens": 2283, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7248702880639791, "lm_q2_score": 0.6893056104028799, "lm_q1q2_score": 0.4996571563768525}} {"text": "/**\n * @file\n * @brief Solution of source-free heat equation and computation of H1\n * \t seminorms on different triangular meshes and refinement levels\n * @author Julien Gacon, Am\u00e9lie Loher\n * @date March 2019\n */\n\n#include \"unstablebvp.h\"\n// General includes\n#include \n#include \n#include \n#include \n// Eigen\n#include \n#include \n// Lehrfempp\n#include \n#include \n#include \n#include \n#include \n\nnamespace UnstableBVP {\n\nstd::shared_ptr createMeshHierarchy(\n const int reflevels, const std::string &mesh_type) {\n // Helper object: mesh factory\n std::shared_ptr mesh_factory_ptr =\n std::make_shared(2);\n\n // Decide where the triangular domain should be located in x_2 direction\n // by adding an offset to the x_2 coordinate of the nodes\n double offset = 0;\n if (mesh_type == \"top\") {\n offset = 1.5;\n } else if (mesh_type == \"bottom\") {\n offset = -1.5;\n } else {\n // already at 0\n }\n\n // Define the nodes\n std::array, 3> node_coord{\n std::array({0.5, -0.5 + offset}),\n std::array({0, 0.5 + offset}),\n std::array({1, 0.5 + offset})};\n\n for (const auto &node : node_coord) {\n mesh_factory_ptr->AddPoint(Eigen::Vector2d({node[0], node[1]}));\n }\n\n // Initialize triangle\n mesh_factory_ptr->AddEntity(lf::base::RefEl::kTria(),\n std::vector({0, 1, 2}),\n std::unique_ptr(nullptr));\n\n // Get a pointer to the mesh\n std::shared_ptr mesh_p = mesh_factory_ptr->Build();\n\n // (optional) Print information about the mesh\n // std::cout << \" Mesh info\\n\" << *mesh_p;\n\n // Ask LehrFEM++ to create a hierarchy of nested meshes\n std::shared_ptr multi_mesh_p =\n lf::refinement::GenerateMeshHierarchyByUniformRefinemnt(mesh_p,\n reflevels);\n\n return multi_mesh_p;\n}\n\ndouble solveTemperatureDistribution(\n std::shared_ptr mesh_p) {\n // **********************************************************************\n // Stage 0: provide all coefficient functions mainly through lambda\n // functions and derived MeshFunctions\n // **********************************************************************\n\n // The boundary condition\n auto bc = [](Eigen::Vector2d x) -> double {\n return x[1] <= 0 ? 1 - x[1] : 0;\n };\n // Wrap into a MeshFunction\n lf::mesh::utils::MeshFunctionGlobal mf_bc{bc};\n\n // We use lowest-order (p.w. linear Lagrangian finite elements), for which\n // LehrFEM++ provides a built-in description according to the paradigm of\n // parametric finite elements.\n auto fe_space =\n std::make_shared>(mesh_p);\n // Reference to current mesh\n const lf::mesh::Mesh &mesh{*(fe_space->Mesh())};\n // Obtain local->global index mapping for current finite element space\n const lf::assemble::DofHandler &dofh{fe_space->LocGlobMap()};\n\n // **********************************************************************\n // Stage 1: Assemble finite element Galerkin matrix\n // **********************************************************************\n\n // Dimension of finite element space`\n const lf::base::size_type N_dofs(dofh.NumDofs());\n // Matrix in triplet format holding Galerkin matrix, zero initially.\n lf::assemble::COOMatrix A(N_dofs, N_dofs);\n\n // Element matrix builder for the negative Laplacian\n lf::uscalfe::LinearFELaplaceElementMatrix elmat_builder{};\n\n // Invoke assembly on cells (co-dimension = 0 as first argument)\n // Information about the mesh and the local-to-global map is passed through\n // a Dofhandler object, argument 'dofh'. This function call adds triplets to\n // the internal COO-format representation of the sparse matrix A.\n lf::assemble::AssembleMatrixLocally(0, dofh, dofh, elmat_builder, A);\n\n // **********************************************************************\n // Stage 2: Right-hand side vector\n // **********************************************************************\n\n // Define RHS vector\n // No source, hence it is simply zero\n Eigen::Matrix phi(N_dofs);\n phi.setZero();\n\n // **********************************************************************\n // Stage 3: Fixing solution components according to essential (Dirichlet)\n // boundary conditions\n // **********************************************************************\n\n // Obtain specification for shape functions on edges\n std::shared_ptr>\n rsf_edge_p = fe_space->ShapeFunctionLayout(lf::base::RefEl::kSegment());\n LF_ASSERT_MSG(rsf_edge_p != nullptr, \"FE specification for edges missing\");\n\n // Obtain an array of boolean flags for the edges (codim 1) of the mesh,\n // `true` indicates that the edge lies on the boundary\n auto bd_flags{lf::mesh::utils::flagEntitiesOnBoundary(fe_space->Mesh(), 1)};\n\n // Fetch flags and values for degrees of freedom located on Dirichlet\n // edges.\n auto ess_bdc_flags_values{lf::uscalfe::InitEssentialConditionFromFunction(\n dofh, *rsf_edge_p,\n [&bd_flags](const lf::mesh::Entity &edge) -> bool {\n return (bd_flags(edge));\n },\n mf_bc)};\n\n // Eliminate Dirichlet dofs from linear system\n lf::assemble::FixFlaggedSolutionComponents(\n [&ess_bdc_flags_values](lf::assemble::glb_idx_t gdof_idx) {\n return ess_bdc_flags_values[gdof_idx];\n },\n A, phi);\n\n // **********************************************************************\n // Stage 4: Solve LSE\n // **********************************************************************\n\n // Assembly completed: Convert COO matrix A into CRS format using Eigen's\n // internal conversion routines.\n Eigen::SparseMatrix A_crs = A.makeSparse();\n\n // Solve linear system using Eigen's sparse direct elimination\n Eigen::SparseLU> solver;\n solver.compute(A_crs);\n LF_VERIFY_MSG(solver.info() == Eigen::Success, \"LU decomposition failed\");\n Eigen::VectorXd sol_vec = solver.solve(phi);\n LF_VERIFY_MSG(solver.info() == Eigen::Success, \"Solving LSE failed\");\n\n // **********************************************************************\n // Stage 5: Compute H1 seminorm\n // **********************************************************************\n\n // Compute the difference to a function that's zero everywhere, hence\n // just the gradient of the solution (which is encapsulated in the fe_space).\n // We use this trick to avoid the manual computation and make use of the\n // LehrFEM facilities :)\n lf::uscalfe::MeshFunctionL2GradientDifference loc_comp(\n fe_space,\n lf::mesh::utils::MeshFunctionConstant(Eigen::Vector2d(0.0, 0.0)), 2);\n\n // Compute the norm of the ``difference'' (i.e. the norm of the gradient of\n // the solution)\n const double norm = lf::uscalfe::NormOfDifference(dofh, loc_comp, sol_vec);\n\n return norm;\n}\n\n} // namespace UnstableBVP\n", "meta": {"hexsha": "c93ac3118d2fed153b4301f0e1b34b6ba9e7ee89", "size": 7338, "ext": "cc", "lang": "C++", "max_stars_repo_path": "homeworks/UnstableBVP/templates/unstablebvp.cc", "max_stars_repo_name": "padomu/NPDECODES", "max_stars_repo_head_hexsha": "d2bc5b0d2d5e76e4d5b8ab6948c82f902211182e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "homeworks/UnstableBVP/templates/unstablebvp.cc", "max_issues_repo_name": "padomu/NPDECODES", "max_issues_repo_head_hexsha": "d2bc5b0d2d5e76e4d5b8ab6948c82f902211182e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "homeworks/UnstableBVP/templates/unstablebvp.cc", "max_forks_repo_name": "padomu/NPDECODES", "max_forks_repo_head_hexsha": "d2bc5b0d2d5e76e4d5b8ab6948c82f902211182e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.8253968254, "max_line_length": 80, "alphanum_fraction": 0.6046606705, "num_tokens": 1786, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.689305616785446, "lm_q2_score": 0.7248702761768248, "lm_q1q2_score": 0.4996571528095028}} {"text": "#include \"sv/util/eigen.h\"\n\n#include \n\n#include \"sv/util/logging.h\"\n\nnamespace sv {\n\nvoid StableRotateBlockTopLeft(MatrixXdRef H,\n VectorXdRef b,\n int block_ind,\n int block_size) {\n CHECK_EQ(H.rows(), b.size());\n CHECK_EQ(H.cols(), b.size());\n CHECK_GE(block_ind, 0);\n CHECK_GT(block_size, 0);\n CHECK_LT(block_ind * block_size, H.rows());\n\n if (block_ind == 0) return;\n const auto n = block_size;\n\n // Permute block of rows up gradually, also need to permute b\n for (int i = block_ind; i > 0; --i) {\n const auto r = i * block_size;\n // swap current rows with above rows\n H.middleRows(r, n).swap(H.middleRows(r - n, n));\n b.segment(r, n).swap(b.segment(r - n, n));\n }\n\n // Permute block of cols\n for (int j = block_ind; j > 0; --j) {\n const auto c = j * block_size;\n // swap current cols with left cols\n H.middleCols(c, n).swap(H.middleCols(c - n, n));\n }\n}\n\nvoid FillLowerTriangular(MatrixXdRef M) {\n CHECK_EQ(M.rows(), M.cols());\n M.triangularView() =\n M.triangularView().transpose();\n}\n\nvoid FillUpperTriangular(MatrixXdRef M) {\n CHECK_EQ(M.rows(), M.cols());\n M.triangularView() =\n M.triangularView().transpose();\n}\n\nvoid MakeSymmetric(MatrixXdRef M) {\n CHECK_EQ(M.rows(), M.cols());\n M += M.transpose().eval();\n M.array() /= 2.0;\n}\n\nvoid MargTopLeftBlock(const MatrixXdCRef& Hf,\n const VectorXdCRef& bf,\n MatrixXdRef Hm,\n VectorXdRef bm,\n int dim) {\n // Pre-condition\n // 1. Hf is square and match bsc and symmetric\n // 2. Hm is quare and match bpr\n const auto nf = bf.size();\n const auto nm = bm.size();\n CHECK_GT(dim, 0);\n CHECK_EQ(nm + dim, nf);\n CHECK_EQ(Hf.rows(), nf);\n CHECK_EQ(Hf.cols(), nf);\n CHECK_EQ(Hm.rows(), nm);\n CHECK_EQ(Hm.cols(), nm);\n CHECK_EQ(Hf, Hf.transpose()) << \"\\n\" << Hf;\n\n // Hf bf\n // [ H00 H01 ] [ x0 ] = [ b0 ]\n // [ H10 H11 ] [ x1 ] = [ b1 ]\n // Hm = H11 - H10 * H00^-1 * H01\n // bm = b1 - H10 * H00^-1 * b0\n const auto H01 = Hf.topRightCorner(dim, nm);\n const auto H10 = Hf.bottomLeftCorner(nm, dim);\n\n // Benchmark shows that simply inverse has similar speed as llt\n // However to account for rank-deficiency we use ldlt to inverse\n const auto H00_inv = Hf.topLeftCorner(dim, dim)\n .selfadjointView()\n .ldlt()\n .solve(Eigen::MatrixXd::Identity(dim, dim))\n .eval();\n\n Hm = Hf.bottomRightCorner(nm, nm);\n Hm.noalias() -= H10 * H00_inv * H01;\n\n const auto b0 = bf.head(dim);\n bm = bf.tail(nm); // b1\n bm.noalias() -= H10 * (H00_inv * b0);\n\n // Make sure Hpr is symmetric\n MakeSymmetric(Hm);\n\n // Post-condition\n // 1. Hpr shape doesn't change\n // 2. Hpr is symmetric\n CHECK_EQ(Hm.rows(), nm);\n CHECK_EQ(Hm.cols(), nm);\n CHECK_EQ(Hm, Hm.transpose()) << \"\\n\" << Hm;\n}\n\n} // namespace sv\n", "meta": {"hexsha": "1f8425fc30c595100b03e6d18dd4f8fe4e7608d0", "size": 3089, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "sv/util/eigen.cpp", "max_stars_repo_name": "versatran01/dsol", "max_stars_repo_head_hexsha": "1c390f10f55fed0d0ef62b0f18e9003bd82c3876", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 52.0, "max_stars_repo_stars_event_min_datetime": "2022-03-17T02:03:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T06:11:52.000Z", "max_issues_repo_path": "sv/util/eigen.cpp", "max_issues_repo_name": "versatran01/dsol", "max_issues_repo_head_hexsha": "1c390f10f55fed0d0ef62b0f18e9003bd82c3876", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sv/util/eigen.cpp", "max_forks_repo_name": "versatran01/dsol", "max_forks_repo_head_hexsha": "1c390f10f55fed0d0ef62b0f18e9003bd82c3876", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 8.0, "max_forks_repo_forks_event_min_datetime": "2022-03-17T06:13:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T09:15:58.000Z", "avg_line_length": 28.3394495413, "max_line_length": 70, "alphanum_fraction": 0.5681450308, "num_tokens": 918, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7248702761768248, "lm_q2_score": 0.6893056040203135, "lm_q1q2_score": 0.49965714355643764}} {"text": "//==================================================================================================\n/*!\n @file\n\n @copyright 2016 NumScale SAS\n\n Distributed under the Boost Software License, Version 1.0.\n (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)\n*/\n//==================================================================================================\n#ifndef BOOST_SIMD_CONSTANT_TWOTOM10_HPP_INCLUDED\n#define BOOST_SIMD_CONSTANT_TWOTOM10_HPP_INCLUDED\n\n#if defined(DOXYGEN_ONLY)\nnamespace boost { namespace simd\n{\n /*!\n @ingroup group-constant\n\n Generate 2 to the power -10 (\\f$2^{-10}\\f$)\n\n\n @par Header \n\n @par Semantic:\n\n @code\n T r = Twotom10();\n @endcode\n\n is similar to:\n\n @code\n T r = pow(2, -10);\n @endcode\n\n @return The Twotom10 constant for the proper type\n **/\n template T Twotom10();\n\n namespace functional\n {\n /*!\n @ingroup group-callable-constant\n Generate the constant twotom10.\n\n @return The Twotom10 constant for the proper type\n **/\n const boost::dispatch::functor twotom10 = {};\n }\n} }\n#endif\n\n#include \n#include \n\n#endif\n", "meta": {"hexsha": "2408557dd04d5bf47523a0b5adff3c37c406cbd0", "size": 1293, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/boost/simd/constant/twotom10.hpp", "max_stars_repo_name": "TobiasLudwig/boost.simd", "max_stars_repo_head_hexsha": "c04d0cc56747188ddb9a128ccb5715dd3608dbc1", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/boost/simd/constant/twotom10.hpp", "max_issues_repo_name": "TobiasLudwig/boost.simd", "max_issues_repo_head_hexsha": "c04d0cc56747188ddb9a128ccb5715dd3608dbc1", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/boost/simd/constant/twotom10.hpp", "max_forks_repo_name": "TobiasLudwig/boost.simd", "max_forks_repo_head_hexsha": "c04d0cc56747188ddb9a128ccb5715dd3608dbc1", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2018-02-16T09:58:18.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-10T14:22:43.000Z", "avg_line_length": 22.2931034483, "max_line_length": 100, "alphanum_fraction": 0.5746326373, "num_tokens": 319, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7248702642896702, "lm_q2_score": 0.6893056104028797, "lm_q1q2_score": 0.49965713998908784}} {"text": "/*\n * Copyright (c) 2013-2015 Masahide Kashiwagi (kashi@waseda.jp)\n */\n\n#ifndef NEWTON_HPP\n#define NEWTON_HPP\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n\nnamespace kv {\n\nnamespace ub = boost::numeric::ublas;\n\n\ntemplate \nbool\nnewton(F f, ub::vector& x, T epsilon = std::numeric_limits::epsilon(), int maxloop = 100)\n{\n\tint s = x.size();\n\tint i, j, r;\n\tub::vector fx;\n\tub::matrix fdx;\n\tT norm1, norm2;\n\n\tfor (i=0; i::split(f(autodif::init(x)), fx, fdx);\n\t\t\tub::permutation_matrix<> pm(s);\n\t\t\tr = ub::lu_factorize(fdx, pm);\n\t\t\tif (r != 0) return false;\n\t\t\tub::lu_substitute(fdx, pm, fx);\n\t\t}\n\t\tcatch (...) {\n\t\t\treturn false;\n\t\t}\n\n\t\tnorm1 = 1.;\n\t\tnorm2 = 0.;\n\t\tfor (j=0; j\nbool\nnewton_random(F f, ub::vector& x, T epsilon = std::numeric_limits::epsilon(), int maxloop = 100)\n{\n\tint s = x.size();\n\tint i;\n\n\tusing namespace boost;\n\t// use \"static\" to be \"randomized\" only once\n\tstatic variate_generator< mt19937, normal_distribution<> > rand (mt19937(time(0)), normal_distribution<>(0., 10.));\n\n\tfor (i=0; i\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace nt2 { namespace ext\n{\n //============================================================================\n // This version of freqspace is called whenever a tie(...) = freqspace(...) is\n // captured before assign is resolved. As a tieable function, freqspace\n // retrieves rhs/lhs pair as inputs\n //============================================================================\n NT2_FUNCTOR_IMPLEMENTATION( nt2::tag::freqspace_, tag::cpu_\n , (A0)(N0)(A1)(N1)\n , ((node_ < A0, nt2::tag::freqspace_\n , N0, nt2::container::domain\n >\n ))\n ((node_ < A1, nt2::tag::tie_\n , N1, nt2::container::domain\n >\n ))\n )\n {\n typedef void result_type;\n typedef typename boost::proto::result_of::child_c::type child0;\n typedef typename boost::proto::result_of::child_c::type child1;\n typedef typename boost::dispatch::meta::\n terminal_of< typename boost::dispatch::meta::\n semantic_of::type\n >::type in0_t;\n typedef typename boost::dispatch::meta::\n terminal_of< typename boost::dispatch::meta::\n semantic_of::type\n >::type out_t;\n\n typedef typename out_t::value_type value_t;\n\n BOOST_FORCEINLINE result_type operator()( A0& a0, A1& a1 ) const\n {\n int n = 0, m = 0;\n bool whole = false;\n bool meshgrid = false;\n getmn(a0, m, n, whole, meshgrid, N0(), N1() );\n compute(a1, m, n, whole, meshgrid, N1());\n }\n\n private:\n BOOST_FORCEINLINE void compute( A1 & a1, int m, int, bool whole\n , bool, boost::mpl::long_<1> const&\n ) const\n {\n if (whole)\n boost::proto\n ::child_c<0>(a1) = freqspace1(m,nt2::whole_,meta::as_());\n else\n boost::proto::child_c<0>(a1) = freqspace1(m, meta::as_());\n }\n\n void compute( A1 & a1, int m, int n, bool\n , bool /*meshgrid*/, boost::mpl::long_<2> const&\n ) const\n {\n value_t hvm = m*nt2::Half();\n value_t hvn = n*nt2::Half();\n value_t hm = nt2::rec(hvm);\n value_t hn = nt2::rec(hvn);\n value_t lm = -nt2::floor(hvm)*hm;\n value_t ln = -nt2::floor(hvn)*hn;\n\n // TODO: implement support for meshgrid option\n //if (meshgrid)\n // {\n boost::proto::child_c<0>(a1) = nt2::_(ln, hn, value_t(1)-value_t(2)/n);\n boost::proto::child_c<1>(a1) = nt2::_(lm, hm, value_t(1)-value_t(1)/m);\n // }\n // else\n // {\n // boost::proto::child_c<0>(a1) = ??;\n // boost::proto::child_c<1>(a1) = ??;\n // }\n }\n\n BOOST_FORCEINLINE //[f] = freqspace(n)\n void getmn(A0 const &a0, int &m, int& n, bool&, bool&,\n boost::mpl::long_<3> const &,//number of inputs\n boost::mpl::long_<1> const &//number of outputs\n ) const\n {\n m = int(boost::proto::value(boost::proto::child_c<1>(a0)));\n n = 0;\n }\n\n BOOST_FORCEINLINE //[f1, f2] = freqspace(n)\n void getmn(A0 const &a0, int &m, int& n, bool&, bool&\n , boost::mpl::long_<3> const & //number of inputs\n , boost::mpl::long_<2> const &\n ) const//number of outputs\n {\n typedef typename boost::proto::result_of::child_c::type child1;\n typedef typename boost::proto::result_of::value::type type_t;\n typedef typename meta::is_scalar::type choice_t;\n m = getval(boost::proto::value(boost::proto::child_c<1>(a0)),0,choice_t());\n n = getval(boost::proto::value(boost::proto::child_c<1>(a0)),1,choice_t());\n }\n\n template < class T > static int getval(const T & a0, int,\n const boost::mpl::bool_ &)\n { return a0; }\n\n template < class T > static int getval(const T & a0, int i,\n const boost::mpl::bool_ &)\n {return a0[i]; }\n\n BOOST_FORCEINLINE //[f] = freqspace(n, whole_)\n void getmn( A0 const &a0, int &m, int& n, bool &whole, bool&\n , boost::mpl::long_<4> const & //number of inputs\n , boost::mpl::long_<1> const & //number of outputs\n ) const\n {\n m = int(boost::proto::value(boost::proto::child_c<1>(a0)));\n n = 0;\n whole = true;\n }\n\n BOOST_FORCEINLINE //[f,g] = freqspace(n, whole_)\n void getmn( A0 const &a0, int &m, int& n, bool &whole, bool&\n , boost::mpl::long_<4> const & //number of inputs\n , boost::mpl::long_<2> const & //number of outputs\n ) const\n {\n m = int(boost::proto::value(boost::proto::child_c<1>(a0)));\n n = 0;\n whole = true;\n }\n\n template < class Dummy >\n BOOST_FORCEINLINE // [f1, f2] = freqspace([m, n])\n void getmn( A0 const &a0, int &m, int& n, bool&, bool&\n , boost::mpl::long_<3> const & //number of inputs\n , boost::mpl::long_<2> const & //number of outputs\n , Dummy()\n ) const\n {\n typedef typename boost::proto::result_of::child_c::type child1;\n typedef typename boost::proto::result_of::value::type type_t;\n typedef typename meta::is_scalar::type choice_t;\n m = getval(boost::proto::value(boost::proto::child_c<1>(a0)),0,choice_t());\n n = getval(boost::proto::value(boost::proto::child_c<1>(a0)),1,choice_t());\n }\n\n template < class Dummy >\n BOOST_FORCEINLINE // [f1, f2] = freqspace([m, n], meshgrid_)\n void getmn( A0 const &a0, int &m, int& n, bool&, bool& meshgrid\n , boost::mpl::long_<4> const & //number of inputs\n , boost::mpl::long_<2> const & //number of outputs\n , Dummy()\n ) const\n {\n typedef typename boost::proto::result_of::child_c::type child1;\n typedef typename boost::proto::result_of::value::type type_t;\n typedef typename meta::is_scalar::type choice_t;\n m = getval(boost::proto::value(boost::proto::child_c<1>(a0)),0,choice_t());\n n = getval(boost::proto::value(boost::proto::child_c<1>(a0)),1,choice_t());\n meshgrid = true;\n }\n };\n} }\n\n#endif\n", "meta": {"hexsha": "f66589a7e2df3824965e496c97137459586557d2", "size": 7841, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "modules/core/generative/include/nt2/core/functions/common/freqspace.hpp", "max_stars_repo_name": "pbrunet/nt2", "max_stars_repo_head_hexsha": "2aeca0f6a315725b335efd5d9dc95d72e10a7fb7", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "modules/core/generative/include/nt2/core/functions/common/freqspace.hpp", "max_issues_repo_name": "pbrunet/nt2", "max_issues_repo_head_hexsha": "2aeca0f6a315725b335efd5d9dc95d72e10a7fb7", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "modules/core/generative/include/nt2/core/functions/common/freqspace.hpp", "max_forks_repo_name": "pbrunet/nt2", "max_forks_repo_head_hexsha": "2aeca0f6a315725b335efd5d9dc95d72e10a7fb7", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.9304812834, "max_line_length": 81, "alphanum_fraction": 0.5035072057, "num_tokens": 2041, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.743167997235783, "lm_q2_score": 0.6723317057447908, "lm_q1q2_score": 0.49965540723647395}} {"text": "#include \n\n#include \n#include \n\nconstexpr uint32_t crc32(std::string_view str)\n{\n askr::crc<32> crc;\n crc.update(str);\n return crc.checksum();\n}\n\nBOOST_AUTO_TEST_SUITE(CRC)\n\nBOOST_AUTO_TEST_CASE(CompileTimeTest)\n{\n static_assert(crc32(\"123456789\") == 0xCBF43926);\n}\n\nBOOST_AUTO_TEST_SUITE_END()", "meta": {"hexsha": "69dadbf663faf925e4b5cdac0cca87a1882ef986", "size": 365, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "askr/utility/test/crc_unittest.cpp", "max_stars_repo_name": "blackkaiserxjc/askr", "max_stars_repo_head_hexsha": "dd1196b60305f20abeeef4d96de6f0dab13b738e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "askr/utility/test/crc_unittest.cpp", "max_issues_repo_name": "blackkaiserxjc/askr", "max_issues_repo_head_hexsha": "dd1196b60305f20abeeef4d96de6f0dab13b738e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "askr/utility/test/crc_unittest.cpp", "max_forks_repo_name": "blackkaiserxjc/askr", "max_forks_repo_head_hexsha": "dd1196b60305f20abeeef4d96de6f0dab13b738e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.25, "max_line_length": 52, "alphanum_fraction": 0.7369863014, "num_tokens": 100, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7431680086124811, "lm_q2_score": 0.6723316926137811, "lm_q1q2_score": 0.4996554051268425}} {"text": "#include \"include_and_types.cpp\"\n#include \n#include \"my_transitive_closure.cpp\"\n#include \n#include \n#include \n\n//Ad-hoc typedef interator_property_map types\ntypedef boost::iterator_property_map<__gnu_cxx::__normal_iterator >,\n boost::vec_adj_list_vertex_id_map,\n long unsigned int, long unsigned int&> typeVertex;\n\ntypedef boost::iterator_property_map<__gnu_cxx::__normal_iterator**,\n std::vector*> >,\n boost::vec_adj_list_vertex_id_map, std::set*,\n std::set*&> typeSetVertex;\n\nusing namespace std;\n\n\nint main(int, char*[]){\n\n //Reading graph from stdin\n Graph g;\n dynamic_properties dp;\n read_graphml(std::cin, g, dp);\n\n //Graph printing\n std::cout << \"A directed graph:\" << std::endl;\n print_graph(g, get(vertex_index, g));\n std::cout << std::endl;\n\n //Declaration of TransitiveClosure object\n TransitiveClosure transitive_cl(&g);\n transitive_cl.transitive_closure_scc();\n\n //used only to compare results. This applies the built-in function of Boost for applying transitive closure\n std::cout << \"BGL\"<\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace boost { namespace simd { namespace ext\n{\n namespace bd = boost::dispatch;\n namespace bs = boost::simd;\n BOOST_DISPATCH_OVERLOAD_IF(is_not_infinite_\n , (typename A0, typename X)\n , (detail::is_native)\n , bd::cpu_\n , bs::pack_, X>\n )\n {\n using result = bs::as_logical_t;\n BOOST_FORCEINLINE result operator()(const A0&) const BOOST_NOEXCEPT\n {\n return bs::True();\n }\n };\n\n BOOST_DISPATCH_OVERLOAD_IF(is_not_infinite_\n , (typename A0, typename X)\n , (detail::is_native)\n , bd::cpu_\n , bs::pack_, X>\n )\n {\n BOOST_FORCEINLINE bs::as_logical_t operator()( const A0& a0) const BOOST_NOEXCEPT\n {\n return is_not_equal(bs::abs(a0),bs::Inf());\n }\n };\n\n} } }\n#endif\n\n", "meta": {"hexsha": "4daeeedac0bed5ceebb7206548036aee9890c29b", "size": 1912, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "third_party/boost/simd/arch/common/simd/function/is_not_infinite.hpp", "max_stars_repo_name": "SylvainCorlay/pythran", "max_stars_repo_head_hexsha": "908ec070d837baf77d828d01c3e35e2f4bfa2bfa", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 6.0, "max_stars_repo_stars_event_min_datetime": "2018-02-25T22:23:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-15T15:13:12.000Z", "max_issues_repo_path": "third_party/boost/simd/arch/common/simd/function/is_not_infinite.hpp", "max_issues_repo_name": "SylvainCorlay/pythran", "max_issues_repo_head_hexsha": "908ec070d837baf77d828d01c3e35e2f4bfa2bfa", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "third_party/boost/simd/arch/common/simd/function/is_not_infinite.hpp", "max_forks_repo_name": "SylvainCorlay/pythran", "max_forks_repo_head_hexsha": "908ec070d837baf77d828d01c3e35e2f4bfa2bfa", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 7.0, "max_forks_repo_forks_event_min_datetime": "2017-12-12T12:36:31.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-10T14:27:07.000Z", "avg_line_length": 33.5438596491, "max_line_length": 100, "alphanum_fraction": 0.5439330544, "num_tokens": 410, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.743167997235783, "lm_q2_score": 0.6723316926137812, "lm_q1q2_score": 0.49965539747792787}} {"text": "/*\nPart of the Fluid Corpus Manipulation Project (http://www.flucoma.org/)\nCopyright 2017-2019 University of Huddersfield.\nLicensed under the BSD-3 License.\nSee license.md file in the project root for full license information.\nThis project has received funding from the European Research Council (ERC)\nunder the European Union\u2019s Horizon 2020 research and innovation programme\n(grant agreement No 725899).\n*/\n\n#pragma once\n\n#include \"../util/ButterworthHPFilter.hpp\"\n#include \"../util/FluidEigenMappings.hpp\"\n#include \"../util/SlideUDFilter.hpp\"\n#include \"../../data/FluidIndex.hpp\"\n#include \"../../data/TensorTypes.hpp\"\n#include \n#include \n\nnamespace fluid {\nnamespace algorithm {\n\nclass EnvelopeGate\n{\n\n using ArrayXd = Eigen::ArrayXd;\n\npublic:\n EnvelopeGate(index maxSize)\n {\n mInputStorage = ArrayXd(maxSize);\n mOutputStorage = ArrayXd(maxSize);\n }\n\n void init(double onThreshold, double offThreshold, double hiPassFreq,\n index minTimeAboveThreshold, index upwardLookupTime,\n index minTimeBelowThreshold, index downwardLookupTime)\n {\n using namespace std;\n\n mMinTimeAboveThreshold = minTimeAboveThreshold;\n mUpwardLookupTime = upwardLookupTime;\n mMinTimeBelowThreshold = minTimeBelowThreshold,\n mDownwardLookupTime = downwardLookupTime;\n mDownwardLatency = max(minTimeBelowThreshold, mDownwardLookupTime);\n mLatency = max(mMinTimeAboveThreshold + mUpwardLookupTime,\n mDownwardLatency);\n if (mLatency < 0) mLatency = 1;\n mHiPassFreq = hiPassFreq;\n initFilters(mHiPassFreq);\n double initVal = min(onThreshold, offThreshold) - 1;\n initBuffers(initVal);\n mSlide.init(initVal);\n mInputState = false;\n mOutputState = false;\n mOnStateCount = 0;\n mOffStateCount = 0;\n mEventCount = 0;\n mSilenceCount = 0;\n mInitialized = true;\n }\n\n double processSample(const double in, double onThreshold, double offThreshold,\n index rampUpTime, index rampDownTime, double hiPassFreq,\n index minEventDuration, index minSilenceDuration)\n {\n using namespace std;\n assert(mInitialized);\n\n mSlide.updateCoeffs(rampUpTime, rampDownTime);\n\n double filtered = in;\n if (hiPassFreq != mHiPassFreq)\n {\n initFilters(hiPassFreq);\n mHiPassFreq = hiPassFreq;\n }\n if (mHiPassFreq > 0)\n filtered = mHiPass2.processSample(mHiPass1.processSample(in));\n\n double rectified = abs(filtered);\n double dB = 20 * log10(rectified);\n double floor = min(offThreshold, onThreshold) - 1;\n double clipped = max(dB, floor);\n double smoothed = mSlide.processSample(clipped);\n bool forcedState = false;\n\n // case 1: we are waiting for event to finish\n if (mOutputState && mEventCount > 0)\n {\n if (mEventCount >= minEventDuration) { mEventCount = 0; }\n else\n {\n forcedState = true;\n mOutputBuffer(mLatency - 1) = 1;\n mEventCount++;\n }\n // case 2: we are waiting for silence to finish\n }\n else if (!mOutputState && mSilenceCount > 0)\n {\n if (mSilenceCount >= minSilenceDuration) { mSilenceCount = 0; }\n else\n {\n forcedState = true;\n mOutputBuffer(mLatency - 1) = 0;\n mSilenceCount++;\n }\n }\n // case 3: need to compute state\n if (!forcedState)\n {\n bool nextState = mInputState;\n if (!mInputState && smoothed >= onThreshold) { nextState = true; }\n if (mInputState && smoothed <= offThreshold) { nextState = false; }\n updateCounters(nextState);\n // establish and refine\n if (!mOutputState && mOnStateCount >= mMinTimeAboveThreshold &&\n mFillCount >= mLatency)\n {\n index onsetIndex =\n refineStart(mLatency - mMinTimeAboveThreshold - mUpwardLookupTime,\n mUpwardLookupTime);\n mOutputBuffer.segment(onsetIndex, mLatency - onsetIndex) = 1;\n mEventCount = mOnStateCount;\n mOutputState = true; // we are officially on\n }\n else if (mOutputState && mOffStateCount >= mDownwardLatency &&\n mFillCount >= mLatency)\n {\n\n index offsetIndex =\n refineStart(mLatency - mDownwardLatency, mDownwardLookupTime);\n mOutputBuffer.segment(offsetIndex, mLatency - offsetIndex) = 0;\n mSilenceCount = mOffStateCount;\n mOutputState = false; // we are officially off\n }\n\n mOutputBuffer(mLatency - 1) = mOutputState ? 1 : 0;\n mInputState = nextState;\n }\n if (mLatency > 1)\n {\n mOutputBuffer.segment(0, mLatency - 1) =\n mOutputBuffer.segment(1, mLatency - 1);\n\n mInputBuffer.segment(0, mLatency - 1) =\n mInputBuffer.segment(1, mLatency - 1);\n }\n mInputBuffer(mLatency - 1) = smoothed;\n if (mFillCount < mLatency) mFillCount++;\n return mOutputBuffer(0);\n }\n\n index getLatency() { return mLatency; }\n bool initialized() { return mInitialized; }\n\n\nprivate:\n void initBuffers(double initialValue)\n {\n using namespace std;\n mInputBuffer = mInputStorage.segment(0, max(mLatency, 1))\n .setConstant(initialValue);\n mOutputBuffer =\n mOutputStorage.segment(0, max(mLatency, 1)).setZero();\n mInputState = false;\n mOutputState = false;\n mFillCount = max(mLatency, 1);\n }\n\n void initFilters(double cutoff)\n {\n mHiPass1.init(cutoff);\n mHiPass2.init(cutoff);\n }\n\n index refineStart(index start, index nSamples)\n {\n if (nSamples < 2) return start + nSamples;\n ArrayXd seg = mInputBuffer.segment(start, nSamples);\n ArrayXd::Index index;\n seg.minCoeff(&index);\n return start + index;\n }\n\n void updateCounters(bool nextState)\n {\n if (!mInputState && nextState)\n {\n mOffStateCount = 0;\n mOnStateCount = 1;\n }\n else if (mInputState && !nextState)\n {\n mOnStateCount = 0;\n mOffStateCount = 1;\n }\n else if (mInputState && nextState)\n {\n mOnStateCount++;\n }\n else if (!mInputState && !nextState)\n {\n mOffStateCount++;\n }\n }\n\n index mLatency;\n index mFillCount;\n double mHiPassFreq{0};\n\n index mMinTimeAboveThreshold{440};\n index mDownwardLookupTime{10};\n index mDownwardLatency;\n index mMinTimeBelowThreshold{10};\n index mUpwardLookupTime{24};\n\n ArrayXd mInputBuffer;\n ArrayXd mOutputBuffer;\n ArrayXd mInputStorage;\n ArrayXd mOutputStorage;\n\n bool mInputState{false};\n bool mOutputState{false};\n\n index mOnStateCount{0};\n index mOffStateCount{0};\n index mEventCount{0};\n index mSilenceCount{0};\n bool mInitialized{false};\n\n ButterworthHPFilter mHiPass1;\n ButterworthHPFilter mHiPass2;\n SlideUDFilter mSlide;\n};\n} // namespace algorithm\n} // namespace fluid\n", "meta": {"hexsha": "3259a0421270b92a151d4979b05d6f4101c4e4da", "size": 6763, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/algorithms/public/EnvelopeGate.hpp", "max_stars_repo_name": "elgiano/flucoma-core", "max_stars_repo_head_hexsha": "d34a04e7a68f24eaf09b24df57020d45664061fc", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/algorithms/public/EnvelopeGate.hpp", "max_issues_repo_name": "elgiano/flucoma-core", "max_issues_repo_head_hexsha": "d34a04e7a68f24eaf09b24df57020d45664061fc", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2022-03-15T10:39:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-15T13:19:22.000Z", "max_forks_repo_path": "include/algorithms/public/EnvelopeGate.hpp", "max_forks_repo_name": "elgiano/flucoma-core", "max_forks_repo_head_hexsha": "d34a04e7a68f24eaf09b24df57020d45664061fc", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.1791666667, "max_line_length": 80, "alphanum_fraction": 0.6596185125, "num_tokens": 1809, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8539127529517043, "lm_q2_score": 0.5851011542032312, "lm_q1q2_score": 0.4996253373409008}} {"text": "// Copyright 2019, Collabora, Ltd.\n// SPDX-License-Identifier: BSL-1.0\n/*!\n * @file\n * @brief C++ sensor fusion/filtering code that uses flexkalman\n * @author Ryan Pavlik \n * @ingroup aux_tracking\n */\n\n#pragma once\n\n#ifndef __cplusplus\n#error \"This header is C++-only.\"\n#endif\n\n#include \n#include \n\n#include \"flexkalman/AugmentedProcessModel.h\"\n#include \"flexkalman/AugmentedState.h\"\n#include \"flexkalman/BaseTypes.h\"\n#include \"flexkalman/PoseState.h\"\n\n\nnamespace xrt::auxiliary::tracking {\n\nnamespace types = flexkalman::types;\nusing flexkalman::types::Vector;\n\n//! For things like accelerometers, which on some level measure the local vector\n//! of a world direction.\ntemplate \nclass WorldDirectionMeasurement : public flexkalman::MeasurementBase>\n{\npublic:\n\tEIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\tstatic constexpr size_t Dimension = 3;\n\tusing MeasurementVector = types::Vector;\n\tusing MeasurementSquareMatrix = types::SquareMatrix;\n\tWorldDirectionMeasurement(types::Vector<3> const &direction,\n\t types::Vector<3> const &reference,\n\t types::Vector<3> const &variance)\n\t : direction_(direction.normalized()), reference_(reference.normalized()), covariance_(variance.asDiagonal())\n\t{}\n\n\tMeasurementSquareMatrix const &\n\tgetCovariance(State const & /*s*/)\n\t{\n\t\treturn covariance_;\n\t}\n\n\ttypes::Vector<3>\n\tpredictMeasurement(State const &s) const\n\t{\n\t\treturn s.getCombinedQuaternion() * reference_;\n\t}\n\n\tMeasurementVector\n\tgetResidual(MeasurementVector const &predictedMeasurement, State const &s) const\n\t{\n\t\treturn predictedMeasurement - reference_;\n\t}\n\n\tMeasurementVector\n\tgetResidual(State const &s) const\n\t{\n\t\treturn getResidual(predictMeasurement(s), s);\n\t}\n\nprivate:\n\ttypes::Vector<3> direction_;\n\ttypes::Vector<3> reference_;\n\tMeasurementSquareMatrix covariance_;\n};\n#if 0\n//! For things like accelerometers, which on some level measure the local vector\n//! of a world direction.\nclass LinAccelWithGravityMeasurement\n : public flexkalman::MeasurementBase\n{\npublic:\n\tEIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\tstatic constexpr size_t Dimension = 3;\n\tusing MeasurementVector = types::Vector;\n\tusing MeasurementSquareMatrix = types::SquareMatrix;\n\tLinAccelWithGravityMeasurement(types::Vector<3> const &direction,\n\t types::Vector<3> const &reference,\n\t types::Vector<3> const &variance)\n\t : direction_(direction), reference_(reference),\n\t covariance_(variance.asDiagonal())\n\t{}\n\n\t// template \n\tMeasurementSquareMatrix const &\n\tgetCovariance(State const & /*s*/)\n\t{\n\t\treturn covariance_;\n\t}\n\n\t// template \n\ttypes::Vector<3>\n\tpredictMeasurement(State const &s) const\n\t{\n\t\treturn reference_;\n\t}\n\n\t// template \n\tMeasurementVector\n\tgetResidual(MeasurementVector const &predictedMeasurement,\n\t State const &s) const\n\t{\n\t\ts.getQuaternion().conjugate() *\n\t\t predictedMeasurement return predictedMeasurement -\n\t\t reference_.normalized();\n\t}\n\n\ttemplate \n\tMeasurementVector\n\tgetResidual(State const &s) const\n\t{\n\t\tMeasurementVector residual =\n\t\t direction_ - reference_ * s.getQuaternion();\n\t\treturn getResidual(predictMeasurement(s), s);\n\t}\n\nprivate:\n\ttypes::Vector<3> direction_;\n\ttypes::Vector<3> reference_;\n\tMeasurementSquareMatrix covariance_;\n};\n#endif\n\nclass BiasedGyroMeasurement : public flexkalman::MeasurementBase\n{\npublic:\n\tEIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\tstatic constexpr size_t Dimension = 3;\n\tusing MeasurementVector = types::Vector;\n\tusing MeasurementSquareMatrix = types::SquareMatrix;\n\tBiasedGyroMeasurement(types::Vector<3> const &angVel, types::Vector<3> const &variance)\n\t : angVel_(angVel), covariance_(variance.asDiagonal())\n\t{}\n\n\ttemplate \n\tMeasurementSquareMatrix const &\n\tgetCovariance(State const & /*s*/)\n\t{\n\t\treturn covariance_;\n\t}\n\n\ttemplate \n\ttypes::Vector<3>\n\tpredictMeasurement(State const &s) const\n\t{\n\t\treturn s.b().stateVector() + angVel_;\n\t}\n\n\ttemplate \n\tMeasurementVector\n\tgetResidual(MeasurementVector const &predictedMeasurement, State const &s) const\n\t{\n\t\treturn predictedMeasurement - s.a().angularVelocity();\n\t}\n\n\ttemplate \n\tMeasurementVector\n\tgetResidual(State const &s) const\n\t{\n\t\treturn getResidual(predictMeasurement(s), s);\n\t}\n\nprivate:\n\ttypes::Vector<3> angVel_;\n\tMeasurementSquareMatrix covariance_;\n};\n/*!\n * For PS Move-like things, where there's a directly-computed absolute position\n * that is not at the tracked body's origin.\n */\nclass AbsolutePositionLeverArmMeasurement : public flexkalman::MeasurementBase\n{\npublic:\n\tEIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\tusing State = flexkalman::pose_externalized_rotation::State;\n\tstatic constexpr size_t Dimension = 3;\n\tusing MeasurementVector = types::Vector;\n\tusing MeasurementSquareMatrix = types::SquareMatrix;\n\n\t/*!\n\t * @todo the point we get from the camera isn't the center of the ball,\n\t * but the center of the visible surface of the ball - a closer\n\t * approximation would be translation along the vector to the center of\n\t * projection....\n\t */\n\tAbsolutePositionLeverArmMeasurement(MeasurementVector const &measurement,\n\t MeasurementVector const &knownLocationInBodySpace,\n\t MeasurementVector const &variance)\n\t : measurement_(measurement), knownLocationInBodySpace_(knownLocationInBodySpace),\n\t covariance_(variance.asDiagonal())\n\t{}\n\n\tMeasurementSquareMatrix const &\n\tgetCovariance(State const & /*s*/)\n\t{\n\t\treturn covariance_;\n\t}\n\n\ttypes::Vector<3>\n\tpredictMeasurement(State const &s) const\n\t{\n\t\treturn s.getIsometry() * knownLocationInBodySpace_;\n\t}\n\n\tMeasurementVector\n\tgetResidual(MeasurementVector const &predictedMeasurement, State const & /*s*/) const\n\t{\n\t\treturn measurement_ - predictedMeasurement;\n\t}\n\n\tMeasurementVector\n\tgetResidual(State const &s) const\n\t{\n\t\treturn getResidual(predictMeasurement(s), s);\n\t}\n\nprivate:\n\tMeasurementVector measurement_;\n\tMeasurementVector knownLocationInBodySpace_;\n\tMeasurementSquareMatrix covariance_;\n};\n\n} // namespace xrt::auxiliary::tracking\n", "meta": {"hexsha": "ffb9c9e0f1d8ad7de045fccc762950fb92aa1689", "size": 6403, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/xrt/auxiliary/tracking/t_fusion.hpp", "max_stars_repo_name": "leviathanch/monado", "max_stars_repo_head_hexsha": "36a540a764fd5529018dfceb28e10804db9596bf", "max_stars_repo_licenses": ["Unlicense", "Apache-2.0", "BSD-2-Clause", "MIT", "BSL-1.0", "BSD-3-Clause"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2021-11-08T05:17:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-24T12:50:59.000Z", "max_issues_repo_path": "src/xrt/auxiliary/tracking/t_fusion.hpp", "max_issues_repo_name": "SimulaVR/monado", "max_issues_repo_head_hexsha": "b5d46eebf5f9b7f96a52639484a1b35d8ab3cd21", "max_issues_repo_licenses": ["Unlicense", "Apache-2.0", "BSD-2-Clause", "MIT", "BSL-1.0", "BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/xrt/auxiliary/tracking/t_fusion.hpp", "max_forks_repo_name": "SimulaVR/monado", "max_forks_repo_head_hexsha": "b5d46eebf5f9b7f96a52639484a1b35d8ab3cd21", "max_forks_repo_licenses": ["Unlicense", "Apache-2.0", "BSD-2-Clause", "MIT", "BSL-1.0", "BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.4806866953, "max_line_length": 115, "alphanum_fraction": 0.7441824145, "num_tokens": 1468, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.85391273808085, "lm_q2_score": 0.5851011542032312, "lm_q1q2_score": 0.49962532863994674}} {"text": "#include \n", "meta": {"hexsha": "fc837f579029a6736ec1992e7691f5d034d70fc8", "size": 54, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_math_special_functions_ellint_rg.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_math_special_functions_ellint_rg.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_math_special_functions_ellint_rg.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 27.0, "max_line_length": 53, "alphanum_fraction": 0.8333333333, "num_tokens": 13, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8221891479496521, "lm_q2_score": 0.6076631698328916, "lm_q1q2_score": 0.4996140638452899}} {"text": "/* -*- mode: c++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n/*\n Copyright (C) 2006, 2007, 2015 Ferdinando Ametrano\n Copyright (C) 2006 Cristina Duminuco\n Copyright (C) 2007 Giorgio Facchinetti\n Copyright (C) 2015 Paolo Mazzocchi\n\n This file is part of QuantLib, a free-software/open-source library\n for financial quantitative analysts and developers - http://quantlib.org/\n\n QuantLib is free software: you can redistribute it and/or modify it\n under the terms of the QuantLib license. You should have received a\n copy of the license along with this program; if not, please email\n . The license is also available online at\n .\n\n This program is distributed in the hope that it will be useful, but WITHOUT\n ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n FOR A PARTICULAR PURPOSE. See the license for more details.\n*/\n\n#ifndef quantlib_abcdcalibration_hpp\n#define quantlib_abcdcalibration_hpp\n\n\n#include \n#include \n#include \n\n#include \n\n#include \n\n\nnamespace QuantLib {\n \n class Quote;\n class OptimizationMethod;\n class ParametersTransformation;\n\n class AbcdCalibration {\n private:\n class AbcdError : public CostFunction {\n public:\n AbcdError(AbcdCalibration* abcd) : abcd_(abcd) {}\n\n Real value(const Array& x) const {\n const Array y = abcd_->transformation_->direct(x);\n abcd_->a_ = y[0];\n abcd_->b_ = y[1];\n abcd_->c_ = y[2];\n abcd_->d_ = y[3];\n return abcd_->error();\n }\n Disposable values(const Array& x) const {\n const Array y = abcd_->transformation_->direct(x);\n abcd_->a_ = y[0];\n abcd_->b_ = y[1];\n abcd_->c_ = y[2];\n abcd_->d_ = y[3];\n return abcd_->errors();\n }\n private:\n AbcdCalibration* abcd_;\n };\n\n class AbcdParametersTransformation : public ParametersTransformation {\n public:\n AbcdParametersTransformation() : y_(Array(4)) {}\n // to constrained <- from unconstrained\n Array direct(const Array& x) const;\n // to unconstrained <- from constrained\n Array inverse(const Array& x) const;\n private:\n mutable Array y_;\n };\n\n public:\n AbcdCalibration() {};\n AbcdCalibration(\n const std::vector& t,\n const std::vector& blackVols,\n Real aGuess = -0.06,\n Real bGuess = 0.17,\n Real cGuess = 0.54,\n Real dGuess = 0.17,\n bool aIsFixed = false,\n bool bIsFixed = false,\n bool cIsFixed = false,\n bool dIsFixed = false,\n bool vegaWeighted = false,\n const boost::shared_ptr& endCriteria\n = boost::shared_ptr(),\n const boost::shared_ptr& method\n = boost::shared_ptr());\n //! adjustment factors needed to match Black vols\n std::vector k(const std::vector& t,\n const std::vector& blackVols) const;\n void compute();\n //calibration results\n Real value(Real x) const;\n Real error() const;\n Real maxError() const;\n Disposable errors() const;\n EndCriteria::Type endCriteria() const;\n Real a() const { return a_; }\n Real b() const { return b_; }\n Real c() const { return c_; }\n Real d() const { return d_; }\n bool aIsFixed_, bIsFixed_, cIsFixed_, dIsFixed_;\n Real a_, b_, c_, d_;\n boost::shared_ptr transformation_;\n private:\n // optimization method used for fitting\n mutable EndCriteria::Type abcdEndCriteria_;\n boost::shared_ptr endCriteria_;\n boost::shared_ptr optMethod_;\n mutable std::vector weights_;\n bool vegaWeighted_;\n //! Parameters\n std::vector times_, blackVols_;\n };\n\n}\n\n/* -*- mode: c++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n/*\n Copyright (C) 2006, 2015 Ferdinando Ametrano\n Copyright (C) 2006 Cristina Duminuco\n Copyright (C) 2005, 2006 Klaus Spanderen\n Copyright (C) 2007 Giorgio Facchinetti\n Copyright (C) 2015 Paolo Mazzocchi\n\n This file is part of QuantLib, a free-software/open-source library\n for financial quantitative analysts and developers - http://quantlib.org/\n\n QuantLib is free software: you can redistribute it and/or modify it\n under the terms of the QuantLib license. You should have received a\n copy of the license along with this program; if not, please email\n . The license is also available online at\n .\n\n This program is distributed in the hope that it will be useful, but WITHOUT\n ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n FOR A PARTICULAR PURPOSE. See the license for more details.\n*/\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace QuantLib {\n\n // to constrained <- from unconstrained\n inline Array AbcdCalibration::AbcdParametersTransformation::direct(const Array& x) const {\n y_[1] = x[1];\n y_[2] = std::exp(x[2]);\n y_[3] = std::exp(x[3]);\n y_[0] = std::exp(x[0]) - y_[3];\n return y_;\n }\n\n // to unconstrained <- from constrained\n inline Array AbcdCalibration::AbcdParametersTransformation::inverse(const Array& x) const {\n y_[1] = x[1];\n y_[2] = std::log(x[2]);\n y_[3] = std::log(x[3]);\n y_[0] = std::log(x[0] + x[3]);\n return y_;\n }\n\n // to constrained <- from unconstrained\n\n inline AbcdCalibration::AbcdCalibration(\n const std::vector& t,\n const std::vector& blackVols,\n Real a, Real b, Real c, Real d,\n bool aIsFixed, bool bIsFixed, bool cIsFixed, bool dIsFixed,\n bool vegaWeighted,\n const boost::shared_ptr& endCriteria,\n const boost::shared_ptr& optMethod)\n : aIsFixed_(aIsFixed), bIsFixed_(bIsFixed),\n cIsFixed_(cIsFixed), dIsFixed_(dIsFixed),\n a_(a), b_(b), c_(c), d_(d),\n abcdEndCriteria_(EndCriteria::None), endCriteria_(endCriteria),\n optMethod_(optMethod), weights_(blackVols.size(), 1.0/blackVols.size()),\n vegaWeighted_(vegaWeighted),\n times_(t), blackVols_(blackVols) {\n\n AbcdMathFunction::validate(a, b, c, d);\n\n QL_REQUIRE(blackVols.size()==t.size(),\n \"mismatch between number of times (\" << t.size() <<\n \") and blackVols (\" << blackVols.size() << \")\");\n\n // if no optimization method or endCriteria is provided, we provide one\n if (!optMethod_) {\n Real epsfcn = 1.0e-8;\n Real xtol = 1.0e-8;\n Real gtol = 1.0e-8;\n bool useCostFunctionsJacobian = false;\n optMethod_ = boost::shared_ptr(new\n LevenbergMarquardt(epsfcn, xtol, gtol, useCostFunctionsJacobian));\n }\n if (!endCriteria_) {\n Size maxIterations = 10000;\n Size maxStationaryStateIterations = 1000;\n Real rootEpsilon = 1.0e-8;\n Real functionEpsilon = 0.3e-4; // Why 0.3e-4 ?\n Real gradientNormEpsilon = 0.3e-4; // Why 0.3e-4 ?\n endCriteria_ = boost::shared_ptr(new\n EndCriteria(maxIterations, maxStationaryStateIterations,\n rootEpsilon, functionEpsilon, gradientNormEpsilon));\n }\n }\n\n inline void AbcdCalibration::compute() {\n if (vegaWeighted_) {\n Real weightsSum = 0.0;\n for (Size i=0; i(new\n AbcdParametersTransformation);\n\n Array guess(4);\n guess[0] = a_;\n guess[1] = b_;\n guess[2] = c_;\n guess[3] = d_;\n\n std::vector parameterAreFixed(4);\n parameterAreFixed[0] = aIsFixed_;\n parameterAreFixed[1] = bIsFixed_;\n parameterAreFixed[2] = cIsFixed_;\n parameterAreFixed[3] = dIsFixed_;\n\n Array inversedTransformatedGuess(transformation_->inverse(guess));\n\n ProjectedCostFunction projectedAbcdCostFunction(costFunction,\n inversedTransformatedGuess, parameterAreFixed);\n\n Array projectedGuess\n (projectedAbcdCostFunction.project(inversedTransformatedGuess));\n\n NoConstraint constraint;\n Problem problem(projectedAbcdCostFunction, constraint, projectedGuess);\n abcdEndCriteria_ = optMethod_->minimize(problem, *endCriteria_);\n Array projectedResult(problem.currentValue());\n Array transfResult(projectedAbcdCostFunction.include(projectedResult));\n\n Array result = transformation_->direct(transfResult);\n AbcdMathFunction::validate(a_, b_, c_, d_);\n a_ = result[0];\n b_ = result[1];\n c_ = result[2];\n d_ = result[3];\n\n }\n }\n\n inline Real AbcdCalibration::value(Real x) const {\n return abcdBlackVolatility(x,a_,b_,c_,d_);\n }\n\n inline std::vector AbcdCalibration::k(const std::vector& t,\n const std::vector& blackVols) const {\n QL_REQUIRE(blackVols.size()==t.size(),\n \"mismatch between number of times (\" << t.size() <<\n \") and blackVols (\" << blackVols.size() << \")\");\n std::vector k(t.size());\n for (Size i=0; i AbcdCalibration::errors() const {\n Array results(times_.size());\n for (Size i=0; i\r\n#endif\r\n#ifndef BOOST_MATH_LOG1P_INCLUDED\r\n# include \r\n#endif\r\n#include \r\n\r\n#ifdef BOOST_NO_STDC_NAMESPACE\r\nnamespace std{ using ::sqrt; using ::fabs; using ::acos; using ::asin; using ::atan; using ::atan2; }\r\n#endif\r\n\r\nnamespace boost{ namespace math{\r\n\r\ntemplate \r\ninline std::complex asin(const std::complex& z)\r\n{\r\n //\r\n // This implementation is a transcription of the pseudo-code in:\r\n //\r\n // \"Implementing the complex Arcsine and Arccosine Functions using Exception Handling.\"\r\n // T E Hull, Thomas F Fairgrieve and Ping Tak Peter Tang.\r\n // ACM Transactions on Mathematical Software, Vol 23, No 3, Sept 1997.\r\n //\r\n\r\n //\r\n // These static constants should really be in a maths constants library:\r\n //\r\n static const T one = static_cast(1);\r\n //static const T two = static_cast(2);\r\n static const T half = static_cast(0.5L);\r\n static const T a_crossover = static_cast(1.5L);\r\n static const T b_crossover = static_cast(0.6417L);\r\n static const T s_pi = boost::math::constants::pi();\r\n static const T half_pi = s_pi / 2;\r\n static const T log_two = boost::math::constants::ln_two();\r\n static const T quarter_pi = s_pi / 4;\r\n#ifdef BOOST_MSVC\r\n#pragma warning(push)\r\n#pragma warning(disable:4127)\r\n#endif\r\n //\r\n // Get real and imaginary parts, discard the signs as we can \r\n // figure out the sign of the result later:\r\n //\r\n T x = std::fabs(z.real());\r\n T y = std::fabs(z.imag());\r\n T real, imag; // our results\r\n\r\n //\r\n // Begin by handling the special cases for infinities and nan's\r\n // specified in C99, most of this is handled by the regular logic\r\n // below, but handling it as a special case prevents overflow/underflow\r\n // arithmetic which may trip up some machines:\r\n //\r\n if((boost::math::isnan)(x))\r\n {\r\n if((boost::math::isnan)(y))\r\n return std::complex(x, x);\r\n if((boost::math::isinf)(y))\r\n {\r\n real = x;\r\n imag = std::numeric_limits::infinity();\r\n }\r\n else\r\n return std::complex(x, x);\r\n }\r\n else if((boost::math::isnan)(y))\r\n {\r\n if(x == 0)\r\n {\r\n real = 0;\r\n imag = y;\r\n }\r\n else if((boost::math::isinf)(x))\r\n {\r\n real = y;\r\n imag = std::numeric_limits::infinity();\r\n }\r\n else\r\n return std::complex(y, y);\r\n }\r\n else if((boost::math::isinf)(x))\r\n {\r\n if((boost::math::isinf)(y))\r\n {\r\n real = quarter_pi;\r\n imag = std::numeric_limits::infinity();\r\n }\r\n else\r\n {\r\n real = half_pi;\r\n imag = std::numeric_limits::infinity();\r\n }\r\n }\r\n else if((boost::math::isinf)(y))\r\n {\r\n real = 0;\r\n imag = std::numeric_limits::infinity();\r\n }\r\n else\r\n {\r\n //\r\n // special case for real numbers:\r\n //\r\n if((y == 0) && (x <= one))\r\n return std::complex(std::asin(z.real()), z.imag());\r\n //\r\n // Figure out if our input is within the \"safe area\" identified by Hull et al.\r\n // This would be more efficient with portable floating point exception handling;\r\n // fortunately the quantities M and u identified by Hull et al (figure 3), \r\n // match with the max and min methods of numeric_limits.\r\n //\r\n T safe_max = detail::safe_max(static_cast(8));\r\n T safe_min = detail::safe_min(static_cast(4));\r\n\r\n T xp1 = one + x;\r\n T xm1 = x - one;\r\n\r\n if((x < safe_max) && (x > safe_min) && (y < safe_max) && (y > safe_min))\r\n {\r\n T yy = y * y;\r\n T r = std::sqrt(xp1*xp1 + yy);\r\n T s = std::sqrt(xm1*xm1 + yy);\r\n T a = half * (r + s);\r\n T b = x / a;\r\n\r\n if(b <= b_crossover)\r\n {\r\n real = std::asin(b);\r\n }\r\n else\r\n {\r\n T apx = a + x;\r\n if(x <= one)\r\n {\r\n real = std::atan(x/std::sqrt(half * apx * (yy /(r + xp1) + (s-xm1))));\r\n }\r\n else\r\n {\r\n real = std::atan(x/(y * std::sqrt(half * (apx/(r + xp1) + apx/(s+xm1)))));\r\n }\r\n }\r\n\r\n if(a <= a_crossover)\r\n {\r\n T am1;\r\n if(x < one)\r\n {\r\n am1 = half * (yy/(r + xp1) + yy/(s - xm1));\r\n }\r\n else\r\n {\r\n am1 = half * (yy/(r + xp1) + (s + xm1));\r\n }\r\n imag = boost::math::log1p(am1 + std::sqrt(am1 * (a + one)));\r\n }\r\n else\r\n {\r\n imag = std::log(a + std::sqrt(a*a - one));\r\n }\r\n }\r\n else\r\n {\r\n //\r\n // This is the Hull et al exception handling code from Fig 3 of their paper:\r\n //\r\n if(y <= (std::numeric_limits::epsilon() * std::fabs(xm1)))\r\n {\r\n if(x < one)\r\n {\r\n real = std::asin(x);\r\n imag = y / std::sqrt(-xp1*xm1);\r\n }\r\n else\r\n {\r\n real = half_pi;\r\n if(((std::numeric_limits::max)() / xp1) > xm1)\r\n {\r\n // xp1 * xm1 won't overflow:\r\n imag = boost::math::log1p(xm1 + std::sqrt(xp1*xm1));\r\n }\r\n else\r\n {\r\n imag = log_two + std::log(x);\r\n }\r\n }\r\n }\r\n else if(y <= safe_min)\r\n {\r\n // There is an assumption in Hull et al's analysis that\r\n // if we get here then x == 1. This is true for all \"good\"\r\n // machines where :\r\n // \r\n // E^2 > 8*sqrt(u); with:\r\n //\r\n // E = std::numeric_limits::epsilon()\r\n // u = (std::numeric_limits::min)()\r\n //\r\n // Hull et al provide alternative code for \"bad\" machines\r\n // but we have no way to test that here, so for now just assert\r\n // on the assumption:\r\n //\r\n BOOST_ASSERT(x == 1);\r\n real = half_pi - std::sqrt(y);\r\n imag = std::sqrt(y);\r\n }\r\n else if(std::numeric_limits::epsilon() * y - one >= x)\r\n {\r\n real = x/y; // This can underflow!\r\n imag = log_two + std::log(y);\r\n }\r\n else if(x > one)\r\n {\r\n real = std::atan(x/y);\r\n T xoy = x/y;\r\n imag = log_two + std::log(y) + half * boost::math::log1p(xoy*xoy);\r\n }\r\n else\r\n {\r\n T a = std::sqrt(one + y*y);\r\n real = x/a; // This can underflow!\r\n imag = half * boost::math::log1p(static_cast(2)*y*(y+a));\r\n }\r\n }\r\n }\r\n\r\n //\r\n // Finish off by working out the sign of the result:\r\n //\r\n if((boost::math::signbit)(z.real()))\r\n real = (boost::math::changesign)(real);\r\n if((boost::math::signbit)(z.imag()))\r\n imag = (boost::math::changesign)(imag);\r\n\r\n return std::complex(real, imag);\r\n#ifdef BOOST_MSVC\r\n#pragma warning(pop)\r\n#endif\r\n}\r\n\r\n} } // namespaces\r\n\r\n#endif // BOOST_MATH_COMPLEX_ASIN_INCLUDED\r\n", "meta": {"hexsha": "4b1e0f8ef04f9bbe1b7ceecdb43de02801a5b310", "size": 7606, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "master/core/third/boost/math/complex/asin.hpp", "max_stars_repo_name": "importlib/klib", "max_stars_repo_head_hexsha": "a59837857689d0e60d3df6d2ebd12c3160efa794", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 198.0, "max_stars_repo_stars_event_min_datetime": "2015-01-13T05:47:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T04:46:46.000Z", "max_issues_repo_path": "master/core/third/boost/math/complex/asin.hpp", "max_issues_repo_name": "isuhao/klib", "max_issues_repo_head_hexsha": "a59837857689d0e60d3df6d2ebd12c3160efa794", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 197.0, "max_issues_repo_issues_event_min_datetime": "2017-07-06T16:53:59.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-31T17:57:51.000Z", "max_forks_repo_path": "master/core/third/boost/math/complex/asin.hpp", "max_forks_repo_name": "isuhao/klib", "max_forks_repo_head_hexsha": "a59837857689d0e60d3df6d2ebd12c3160efa794", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 139.0, "max_forks_repo_forks_event_min_datetime": "2015-01-15T20:09:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-31T15:21:16.000Z", "avg_line_length": 30.1825396825, "max_line_length": 102, "alphanum_fraction": 0.4884301867, "num_tokens": 1945, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8221891392358015, "lm_q2_score": 0.6076631698328916, "lm_q1q2_score": 0.4996140585502038}} {"text": "#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace std;\nusing namespace Eigen;\nusing namespace cv;\n\ntypedef vector> VecVector2d;\n\n// Camera intrinsics\n// \u5185\u53c2\ndouble fx = 718.856, fy = 718.856, cx = 607.1928, cy = 185.2157;\n// \u57fa\u7ebf\ndouble baseline = 0.573;\n// paths\nstring left_file = \"../left.png\";\nstring disparity_file = \"../disparity.png\";\nboost::format fmt_others(\"../%06d.png\"); // other files\n\n// useful typedefs\ntypedef Eigen::Matrix Matrix6d;\ntypedef Eigen::Matrix Matrix26d;\ntypedef Eigen::Matrix Vector6d;\n\n// TODO implement this function\n/**\n * pose estimation using direct method\n * @param img1\n * @param img2\n * @param px_ref\n * @param depth_ref\n * @param T21\n */\nvoid DirectPoseEstimationMultiLayer(\n const cv::Mat &img1,\n const cv::Mat &img2,\n const VecVector2d &px_ref,\n const vector depth_ref,\n Sophus::SE3 &T21\n);\n\n// TODO implement this function\n/**\n * pose estimation using direct method\n * @param img1\n * @param img2\n * @param px_ref\n * @param depth_ref\n * @param T21\n */\nvoid DirectPoseEstimationSingleLayer(\n const cv::Mat &img1,\n const cv::Mat &img2,\n const VecVector2d &px_ref,\n const vector depth_ref,\n Sophus::SE3 &T21\n);\n\n// bilinear interpolation\ninline float GetPixelValue(const cv::Mat &img, float x, float y) {\n uchar *data = &img.data[int(y) * img.step + int(x)];\n float xx = x - floor(x);\n float yy = y - floor(y);\n return float(\n (1 - xx) * (1 - yy) * data[0] +\n xx * (1 - yy) * data[1] +\n (1 - xx) * yy * data[img.step] +\n xx * yy * data[img.step + 1]\n );\n}\n\ninline bool OutOfImg(float u, float v, int col, int row){\n\tif(u >= 0 && u < col && v >= 0 && v < row)\n\t\treturn false;\n\telse{\n\t\treturn true;\n\t}\n}\n\nint main(int argc, char **argv) {\n\n cv::Mat left_img = cv::imread(left_file, 0);\n cv::Mat disparity_img = cv::imread(disparity_file, 0);\n\n // let's randomly pick pixels in the first image and generate some 3d points in the first image's frame\n cv::RNG rng;\n int nPoints = 1000;\n int boarder = 40;\n VecVector2d pixels_ref;\n vector depth_ref;\n\n // generate pixels in ref and load depth data\n for (int i = 0; i < nPoints; i++) {\n int x = rng.uniform(boarder, left_img.cols - boarder); // don't pick pixels close to boarder\n int y = rng.uniform(boarder, left_img.rows - boarder); // don't pick pixels close to boarder\n int disparity = disparity_img.at(y, x);\n double depth = fx * baseline / disparity; // you know this is disparity to depth\n depth_ref.push_back(depth);\n pixels_ref.push_back(Eigen::Vector2d(x, y));\n }\n\n // estimates 01~05.png's pose using this information\n Sophus::SE3 T_cur_ref;\n\n for (int i = 1; i < 6; i++) { // 1~10\n cv::Mat img = cv::imread((fmt_others % i).str(), 0);\n // DirectPoseEstimationSingleLayer(left_img, img, pixels_ref, depth_ref, T_cur_ref); // first you need to test single layer\n DirectPoseEstimationMultiLayer(left_img, img, pixels_ref, depth_ref, T_cur_ref);\n }\n}\n\nvoid DirectPoseEstimationSingleLayer(\n const cv::Mat &img1,\n const cv::Mat &img2,\n const VecVector2d &px_ref,\n const vector depth_ref,\n Sophus::SE3 &T21\n) {\n\n // parameters\n int half_patch_size = 4;\n int iterations = 100;\n\n double cost = 0, lastCost = 0;\n int nGood = 0; // good projections\n VecVector2d goodProjection;\n\n for (int iter = 0; iter < iterations; iter++) {\n nGood = 0;\n goodProjection.clear();\n\n // Define Hessian and bias\n Matrix6d H = Matrix6d::Zero(); // 6x6 Hessian\n Vector6d b = Vector6d::Zero(); // 6x1 bias\n\n for (size_t i = 0; i < px_ref.size(); i++) {\n\n // compute the projection in the second image\n // TODO START YOUR CODE HERE\n float u = px_ref[i](0), v = px_ref[i](1);\n\t\t\tdouble zCam1 = depth_ref[i];\n\t\t\tdouble xCam1 = double(zCam1 / fx * (u - cx));\n\t\t\tdouble yCam1 = double(zCam1 / fy * (v - cy));\n\t\t\tVector3d pCam1(xCam1, yCam1, zCam1);\n\t\t\tVector3d pCam2 = T21 * pCam1;\n\t\t\tdouble xCam2 = pCam2[0], yCam2 = pCam2[1], zCam2 = pCam2[2];\n\t\t\tfloat u2 = float(fx * xCam2 / zCam2 + cx);\n\t\t\tfloat v2 = float(fy * yCam2 / zCam2 + cy);\n\t\t\t\n\t\t\tif(OutOfImg(u-half_patch_size, v-half_patch_size, img1.cols, img1.rows)\n\t\t\t || OutOfImg(u+half_patch_size-1, v+half_patch_size-1, img1.cols, img1.rows) \n\t\t\t || OutOfImg(u2-half_patch_size, v2-half_patch_size, img2.cols, img2.rows)\n\t\t\t || OutOfImg(u2+half_patch_size-1, v2+half_patch_size-1, img2.cols, img2.rows)\n\t\t\t ){\n\t\t\t//\tcout << \"[DM] pixel out of img \" << endl;\n\t\t\t//\tcout << u << \" \" << v << \" \" << img1.cols << \" \" << img1.rows << endl;\n\t\t\t//\tcout << u2 << \" \" << v2 << \" \" << img2.cols << \" \" << img2.rows << endl;\n\t\t\t\tcontinue;\n\t\t\t}\n\n//\t\t\tcout << \"uv in cam1:\" << u << \" \" << v << endl;\n//\t\t\tcout << \"uv in cam2:\" << u2 << \" \" << v2 << endl;\n//\t\t\tcout << \"p in cam2: \" << pCam2 << endl; \n//\t\t\tcout << \"p in cam1: \" << pCam1 << endl; \n nGood++;\n goodProjection.push_back(Eigen::Vector2d(u2, v2));\n\n\t\t\tdouble xCam2_2 = xCam2 * xCam2, yCam2_2 = yCam2 * yCam2, zCam2_2 = zCam2 * zCam2;\n\t\t\tMatrix26d J_pixel_xi; // pixel to \\xi in Lie algebra\n\t\t\tJ_pixel_xi(0,0) = fx / zCam2;\n\t\t\tJ_pixel_xi(0,1) = 0;\n\t\t J_pixel_xi(0,2) = - fx * xCam2 / zCam2_2;\n\t\t J_pixel_xi(0,3) = - fx * xCam2 * yCam2 / zCam2_2;\n\t\t J_pixel_xi(0,4) = fx + fx * xCam2_2 / zCam2_2;\n\t\t J_pixel_xi(0,5) = - fx * yCam2 / zCam2;\n\t\t J_pixel_xi(1,0) = 0;\n\t\t J_pixel_xi(1,1) = fy / zCam2;\n\t\t J_pixel_xi(1,2) = -fy * yCam2 / zCam2_2;\n\t\t J_pixel_xi(1,3) = -fy - fy * yCam2_2 / zCam2_2;\n\t\t J_pixel_xi(1,4) = fy * xCam2 * yCam2 / zCam2_2;\n\t\t J_pixel_xi(1,5) = fy * xCam2 / zCam2;\n \n\t\t\t// and compute error and jacobian\n for (int x = -half_patch_size; x < half_patch_size; x++)\n for (int y = -half_patch_size; y < half_patch_size; y++) {\n\n double error = 0;\n Eigen::Vector2d J_img_pixel; // image gradients\n\n float u1_patch = u + x, v1_patch = v + y;\n\t\t\t\t\tfloat u2_patch = u2 + x, v2_patch = v2 + y;\n\t\t\t\t\terror = GetPixelValue(img1, u1_patch, v1_patch) - GetPixelValue(img2, u2_patch, v2_patch);\n\t\t \t\tJ_img_pixel[0] = (GetPixelValue(img2, u2_patch + 1, v2_patch) - GetPixelValue(img2, u2_patch - 1, v2_patch)) / 2;\n\t\t \t\t\tJ_img_pixel[1] = (GetPixelValue(img2, u2_patch, v2_patch + 1) - GetPixelValue(img2, u2_patch, v2_patch - 1)) / 2 ;\t\n\t\t\t\t\t// total jacobian\n Vector6d J = -J_pixel_xi.transpose() * J_img_pixel;\n\n H += J * J.transpose();\n b += -error * J;\n cost += error * error;\n }\n // END YOUR CODE HERE\n }\n\n // solve update and put it into estimation\n // TODO START YOUR CODE HERE\n Vector6d update;\n\t\tupdate = H.ldlt().solve(b);\n T21 = Sophus::SE3::exp(update) * T21;\n // END YOUR CODE HERE\n\n cost /= nGood;\n\n if (std::isnan(update[0])) {\n // sometimes occurred when we have a black or white patch and H is irreversible\n cout << \"update is nan\" << endl;\n break;\n }\n if (iter > 0 && cost > lastCost) {\n// cout << \"cost increased: \" << cost << \", \" << lastCost << endl;\n break;\n }\n lastCost = cost;\n// cout << \"cost = \" << cost << \", good = \" << nGood << endl;\n }\n// cout << \"good projection: \" << nGood << endl;\n// cout << \"T21 = \\n\" << T21.matrix() << endl;\n\n // in order to help you debug, we plot the projected pixels here\n// cv::Mat img1_show, img2_show;\n// cv::cvtColor(img1, img1_show, CV_GRAY2BGR);\n// cv::cvtColor(img2, img2_show, CV_GRAY2BGR);\n// for (auto &px: px_ref) {\n// cv::rectangle(img1_show, cv::Point2f(px[0] - 2, px[1] - 2), cv::Point2f(px[0] + 2, px[1] + 2),\n// cv::Scalar(0, 250, 0));\n// }\n// for (auto &px: goodProjection) {\n// cv::rectangle(img2_show, cv::Point2f(px[0] - 2, px[1] - 2), cv::Point2f(px[0] + 2, px[1] + 2),\n// cv::Scalar(0, 250, 0));\n// }\n// cv::imshow(\"reference\", img1_show);\n// cv::imshow(\"current\", img2_show);\n// cv::waitKey();\n}\n\nvoid DirectPoseEstimationMultiLayer(\n const cv::Mat &img1,\n const cv::Mat &img2,\n const VecVector2d &px_ref,\n const vector depth_ref,\n Sophus::SE3 &T21\n) {\n\n // parameters\n int pyramids = 4;\n double pyramid_scale = 0.5;\n double scales[] = {1.0, 0.5, 0.25, 0.125};\n\n // create pyramids\n vector pyr1, pyr2; // image pyramids\n // TODO START YOUR CODE HERE\n\tfor(int i = 0; i < pyramids; i++){\n\t\tMat img1_resize, img2_resize;\n\t\tcv::resize(img1, img1_resize, Size(img1.cols * scales[i], img1.rows * scales[i]));\n\t\tcv::resize(img2, img2_resize, Size(img2.cols * scales[i], img2.rows * scales[i]));\n\t\tpyr1.push_back(img1_resize);\n\t\tpyr2.push_back(img2_resize);\n\t}\n // END YOUR CODE HERE\n\n double fxG = fx, fyG = fy, cxG = cx, cyG = cy; // backup the old values\n for (int level = pyramids - 1; level >= 0; level--) {\n VecVector2d px_ref_pyr; // set the keypoints in this pyramid level\n for (auto &px: px_ref) {\n px_ref_pyr.push_back(scales[level] * px);\n }\n\n // TODO START YOUR CODE HERE\n // scale fx, fy, cx, cy in different pyramid levels\n\t\tfx = fxG * scales[level];\n\t\tfy = fyG * scales[level];\n\t\tcx = cxG * scales[level];\n\t\tcy = cyG * scales[level];\n\n // END YOUR CODE HERE\n DirectPoseEstimationSingleLayer(pyr1[level], pyr2[level], px_ref_pyr, depth_ref, T21);//T21 have add before's cal result.\n }\n\tcout << \"Multi T21 = \\n\" << T21.matrix() << endl;\n}\n", "meta": {"hexsha": "2629064ad49f46b9096979ca1e90109a45562467", "size": 10084, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "slam/PA6_code/code/direct_method.cpp", "max_stars_repo_name": "wallEVA96/algorithm", "max_stars_repo_head_hexsha": "c64e50eff9ad928015ce2780086dd9682c8e2220", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 19.0, "max_stars_repo_stars_event_min_datetime": "2018-12-27T05:44:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T03:36:15.000Z", "max_issues_repo_path": "slam/PA6_code/code/direct_method.cpp", "max_issues_repo_name": "wallEVA96/algorithm", "max_issues_repo_head_hexsha": "c64e50eff9ad928015ce2780086dd9682c8e2220", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "slam/PA6_code/code/direct_method.cpp", "max_forks_repo_name": "wallEVA96/algorithm", "max_forks_repo_head_hexsha": "c64e50eff9ad928015ce2780086dd9682c8e2220", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 8.0, "max_forks_repo_forks_event_min_datetime": "2020-04-23T02:01:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-28T02:55:16.000Z", "avg_line_length": 34.1830508475, "max_line_length": 133, "alphanum_fraction": 0.5800277668, "num_tokens": 3225, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.822189134878876, "lm_q2_score": 0.6076631698328917, "lm_q1q2_score": 0.4996140559026607}} {"text": "//\n// AMGCLSolver.hpp\n// IPC\n//\n// Created by Minchen Li on 11/06/19.\n//\n\n#ifndef AMGCLSolver_hpp\n#define AMGCLSolver_hpp\n\n#include \"LinSysSolver.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n\n#include \n#include \n\n#define USE_BW_BACKEND // use blockwise backend, must undef USE_BW_AGGREGATION\n// #define USE_BW_AGGREGATION // use scalar backend but blockwise aggregation, must undef USE_BW_BACKEND\n// if neither of above is defined, will use scalar backend and aggregation\n\n// #define USE_AMG_SOLVER // use a single V-cycle to approximately solve the linear system\n// if not defined then will use V-cycle preconditioned CG to solve the linear system\n\nnamespace IPC {\n\ntemplate \nclass AMGCLSolver : public LinSysSolver {\n typedef LinSysSolver Base;\n\n#ifdef USE_BW_BACKEND\n typedef amgcl::static_matrix value_type;\n typedef amgcl::static_matrix rhs_type;\n typedef amgcl::backend::builtin BBackend;\n using Solver = amgcl::make_solver<\n // Use AMG as preconditioner:\n amgcl::amg<\n BBackend,\n amgcl::coarsening::smoothed_aggregation,\n amgcl::relaxation::gauss_seidel>,\n // And BiCGStab as iterative solver:\n amgcl::solver::lgmres>;\n#else\n typedef amgcl::backend::builtin Backend;\n // Use AMG as preconditioner:\n typedef amgcl::make_solver<\n // Use AMG as preconditioner:\n amgcl::amg<\n Backend,\n amgcl::coarsening::smoothed_aggregation,\n amgcl::relaxation::gauss_seidel>,\n // And CG as iterative solver:\n amgcl::solver::lgmres>\n Solver;\n#endif\n\nprotected:\n Solver* solver;\n std::vector _ia, _ja;\n std::vector _a;\n\npublic:\n AMGCLSolver(void);\n ~AMGCLSolver(void);\n\n void set_pattern(const std::vector>& vNeighbor,\n const std::set& fixedVert);\n void load(const char* filePath, Eigen::VectorXd& rhs);\n\n void load_AMGCL(const char* filePath, Eigen::VectorXd& rhs);\n void write_AMGCL(const char* filePath, const Eigen::VectorXd& rhs) const;\n\n void copyOffDiag_IJ(void);\n void copyOffDiag_a(void);\n\n void analyze_pattern(void);\n\n bool factorize(void);\n\n void solve(Eigen::VectorXd& rhs,\n Eigen::VectorXd& result);\n};\n\n} // namespace IPC\n\n#endif /* AMGCLSolver_hpp */\n", "meta": {"hexsha": "6b9887d9fba450ce580d7ff2d9a9ccf08abd79ba", "size": 2852, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/LinSysSolver/AMGCLSolver.hpp", "max_stars_repo_name": "vincentkslim/IPC", "max_stars_repo_head_hexsha": "eb702ead6f23a1dc0be39c9f5a0fd62c80abeb98", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9.0, "max_stars_repo_stars_event_min_datetime": "2021-11-03T18:39:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T09:19:14.000Z", "max_issues_repo_path": "src/LinSysSolver/AMGCLSolver.hpp", "max_issues_repo_name": "vincentkslim/IPC", "max_issues_repo_head_hexsha": "eb702ead6f23a1dc0be39c9f5a0fd62c80abeb98", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-11-03T18:47:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-28T19:41:49.000Z", "max_forks_repo_path": "src/LinSysSolver/AMGCLSolver.hpp", "max_forks_repo_name": "vincentkslim/IPC", "max_forks_repo_head_hexsha": "eb702ead6f23a1dc0be39c9f5a0fd62c80abeb98", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2021-11-03T18:57:40.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-22T06:43:37.000Z", "avg_line_length": 29.1020408163, "max_line_length": 104, "alphanum_fraction": 0.7040673212, "num_tokens": 767, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8221891305219504, "lm_q2_score": 0.6076631698328916, "lm_q1q2_score": 0.49961405325511743}} {"text": "#include \n#include \n#include \n#include \n\nnamespace lm\n{\nbox2f get_box(Eigen::Vector2f extents) { return box2f{-extents, extents}; }\n\nfloat distance(Eigen::Vector2f point, Eigen::Vector2f box_extents)\n{\n box2f box = get_box(box_extents);\n\n if (boost::geometry::within(point, box))\n {\n Eigen::Vector2f a{-box_extents[0], -box_extents[1]},\n b{-box_extents[0], box_extents[1]}, c{box_extents[0], box_extents[1]},\n d{box_extents[0], -box_extents[1]};\n\n std::initializer_list segments{\n {a, b}, {b, c}, {c, d}, {d, a}};\n\n float distance{std::numeric_limits::max()};\n\n for (auto segment : segments)\n {\n distance = std::min(\n (float)boost::geometry::distance(segment, point), distance);\n }\n return -distance;\n }\n\n return boost::geometry::distance(point, box);\n}\n\nstd::pair\n closest_edge(Eigen::Vector2f point, Eigen::Vector2f box_extents)\n{\n Eigen::Vector2f a{-box_extents[0], -box_extents[1]},\n b{-box_extents[0], box_extents[1]}, c{box_extents[0], box_extents[1]},\n d{box_extents[0], -box_extents[1]};\n\n std::initializer_list segments{{a, b}, {b, c}, {c, d}, {d, a}};\n\n float min_distance{std::numeric_limits::max()};\n segment2f closest;\n\n for (auto segment : segments)\n {\n double segment_distance = boost::geometry::distance(segment, point);\n if (segment_distance < min_distance)\n {\n min_distance = segment_distance;\n closest = segment;\n }\n }\n\n return {closest, min_distance};\n}\n} // namespace lm\n", "meta": {"hexsha": "7b6a0aa7fb9ff341b2072940b4a0dee9d13f2d53", "size": 1709, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "lmlib/src/geometry/intersection.cpp", "max_stars_repo_name": "Lawrencemm/LM-Engine", "max_stars_repo_head_hexsha": "9c5e59e64e2a5a24c347538fa49046ab5a88d1f5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 40.0, "max_stars_repo_stars_event_min_datetime": "2020-03-13T06:12:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-16T21:05:34.000Z", "max_issues_repo_path": "lmlib/src/geometry/intersection.cpp", "max_issues_repo_name": "Lawrencemm/LM-Engine", "max_issues_repo_head_hexsha": "9c5e59e64e2a5a24c347538fa49046ab5a88d1f5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 31.0, "max_issues_repo_issues_event_min_datetime": "2020-02-09T06:25:12.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-31T04:37:08.000Z", "max_forks_repo_path": "lmlib/src/geometry/intersection.cpp", "max_forks_repo_name": "Lawrencemm/LM-Engine", "max_forks_repo_head_hexsha": "9c5e59e64e2a5a24c347538fa49046ab5a88d1f5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2020-03-13T06:12:12.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-21T15:41:17.000Z", "avg_line_length": 28.0163934426, "max_line_length": 80, "alphanum_fraction": 0.6184903452, "num_tokens": 482, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8221891305219504, "lm_q2_score": 0.6076631698328916, "lm_q1q2_score": 0.49961405325511743}} {"text": "//////////////////////////////////////////////////////////////////////////////\n// distribution::toolkit::distributions::gamma::is_log_concave //\n// //\n// (C) Copyright 2009 Erwann Rogard //\n// Use, modification and distribution are subject to the //\n// Boost Software License, Version 1.0. (See accompanying file //\n// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) //\n//////////////////////////////////////////////////////////////////////////////\n#ifndef BOOST_STATISTICS_DETAIL_DISTRIBUTION_TOOLKIT_GAMMA_IS_LOG_CONCAVE_HPP_ER_2009\n#define BOOST_STATISTICS_DETAIL_DISTRIBUTION_TOOLKIT_GAMMA_IS_LOG_CONCAVE_HPP_ER_2009\n#include \n#include \n#include \n\nnamespace boost{\nnamespace math{\n\ntemplate \ninline bool is_log_concave(\n const boost::math::gamma_distribution& dist\n){\n return ( dist.shape() > static_cast(1) );\n}\n\n}// math\n}// boost\n\n#endif\n", "meta": {"hexsha": "01bab13ae9622b75d0e349f2213675342a8fc6c9", "size": 1137, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "distribution_toolkit/boost/statistics/detail/distribution_toolkit/distributions/gamma/is_log_concave.hpp", "max_stars_repo_name": "rogard/boost_sandbox_statistics", "max_stars_repo_head_hexsha": "16aacbc716a31a9f7bb6c535b1c90dc343282a23", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "distribution_toolkit/boost/statistics/detail/distribution_toolkit/distributions/gamma/is_log_concave.hpp", "max_issues_repo_name": "rogard/boost_sandbox_statistics", "max_issues_repo_head_hexsha": "16aacbc716a31a9f7bb6c535b1c90dc343282a23", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "distribution_toolkit/boost/statistics/detail/distribution_toolkit/distributions/gamma/is_log_concave.hpp", "max_forks_repo_name": "rogard/boost_sandbox_statistics", "max_forks_repo_head_hexsha": "16aacbc716a31a9f7bb6c535b1c90dc343282a23", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.2068965517, "max_line_length": 85, "alphanum_fraction": 0.545294635, "num_tokens": 229, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8221891218080991, "lm_q2_score": 0.6076631698328916, "lm_q1q2_score": 0.49961404796003095}} {"text": "/*-------------Lanczos.cpp----------------------------------------------------//\n*\n* Purpose: To diagonalize a random matrix using the Lanczos algorithm\n*\n* Notes: Compile with (for Arch systems):\n* g++ -I /usr/include/eigen3/ Lanczos.cpp\n* 0's along the prime diagonal. I don't know what this means.\n*\n*-----------------------------------------------------------------------------*/\n \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace Eigen;\n\n// Function for the lanczos algorithm, returns Tri-diagonal matrix\nMatrixXd lanczos(MatrixXd &d_matrix, int row_num);\n\n// Function for QR decomposition\nMatrixXd qrdecomp(MatrixXd &Tridiag);\n\n// Function to perform the Power Method\nvoid p_method(MatrixXd &Tridiag, MatrixXd &Q);\n\n// Function to return sign of value (signum function)\nint sign(double value);\n\n// Function to check eigenvectors and values\nvoid eigentest(MatrixXd &d_matrix, MatrixXd &Q);\n\n/*----------------------------------------------------------------------------//\n* MAIN\n*-----------------------------------------------------------------------------*/\n\nint main(){\n\n int size = 200;\n MatrixXd d_matrix(size,size);\n\n // set up random device\n static std::random_device rd;\n int seed = rd();\n static std::mt19937 gen(seed);\n std::uniform_real_distribution dist(0,1);\n\n for (size_t i = 0; i < d_matrix.rows(); ++i){\n for (size_t j = 0; j <= i; ++j){\n d_matrix(i,j) = dist(gen);\n d_matrix(j,i) = d_matrix(i,j);\n }\n }\n\n MatrixXd Tridiag = lanczos(d_matrix, 20);\n\n //std::cout << '\\n' << \"Tridiagonal matrix is: \\n\" << Tridiag << '\\n';\n\n /*\n // Testing for eigenvalue determination\n MatrixXd Tridiag(3,3);\n Tridiag << 1, 2, 0,\n 2, 1, 0,\n 0, 0, -3;\n\n MatrixXd Tridiag(5,5);\n Tridiag << 5, 0, 0, 0, 0,\n 0, 4, 0, 0, 0,\n 0, 0, 3, 0, 0,\n 0, 0, 0, 2, 0,\n 0, 0, 0, 0, 7;\n Tridiag = lanczos(Tridiag, 5);\n */\n\n //std::cout << \"Tridiag is: \" << '\\n' << Tridiag << '\\n';\n\n MatrixXd Q = qrdecomp(Tridiag);\n\n MatrixXd Qtemp = Q;\n\n //std::cout << \"Q is: \" << '\\n';\n //std::cout << Q << '\\n';\n\n std::cout << \"Finding eigenvalues: \" << '\\n';\n p_method(Tridiag, Q);\n\n //std::cout << \"Q is: \" << '\\n' << Q << '\\n';\n\n Qtemp = Qtemp - Q;\n //std::cout << \"After the Power Method: \" << Qtemp.squaredNorm() << '\\n';\n eigentest(Tridiag, Q);\n\n}\n\n/*----------------------------------------------------------------------------//\n* SUBROUTINE\n*-----------------------------------------------------------------------------*/\n\n// Function for the lanczos algorithm, returns Tri-diagonal matrix\nMatrixXd lanczos(MatrixXd &d_matrix, int row_num){\n\n // Creating random device\n static std::random_device rd;\n int seed = rd();\n static std::mt19937 gen(seed);\n std::uniform_real_distribution dist(0,1); \n\n // Defining values\n double threshold = 0.01;\n int j = 0;\n int size = d_matrix.rows();\n\n // Setting beta arbitrarily large for now \n double beta = 10;\n\n // generating the first rayleigh vector\n // alpha is actually just a double... sorry about that.\n MatrixXd rayleigh(d_matrix.rows(),1), q(d_matrix.rows(),1),\n alpha(1, 1);\n MatrixXd identity = MatrixXd::Identity(d_matrix.rows(), d_matrix.cols());\n\n // krylov is the krylovian subspace... Note, there might be a dynamic way to\n // do this. Something like:\n //std::vector krylov;\n MatrixXd krylov(d_matrix.rows(), row_num);\n\n for (size_t i = 0; i < size; ++i){\n rayleigh(i) = dist(gen);\n }\n\n //std::cout << rayleigh << '\\n';\n\n //while (beta > threshold){\n for (size_t i = 0; i < row_num; ++i){\n beta = rayleigh.norm();\n //std::cout << \"beta is: \\n\" << beta << '\\n';\n\n q = rayleigh / beta;\n //std::cout << \"q is: \\n\" << q << '\\n';\n\n alpha = q.transpose() * d_matrix * q;\n //std::cout << \"alpha is \\n\" << alpha << '\\n';\n\n if (j == 0){\n rayleigh = (d_matrix - alpha(0,0) * identity) * q;\n }\n else{\n rayleigh = (d_matrix - alpha(0,0) * identity) * q \n - beta * krylov.col(j - 1);\n\n }\n //std::cout << \"rayleigh is: \\n\" << rayleigh <<'\\n';\n //std::cout << \"i is: \" << i << '\\n';\n\n //krylov.push_back(q);\n krylov.col(j) = q;\n j = j+1;\n // std::cout << j << '\\n';\n }\n\n /*\n MatrixXd krylov_id = krylov.transpose() * krylov;\n std::cout << \"The identity matrix from the krylov subspace is: \\n\" \n << krylov_id << '\\n';\n */\n\n MatrixXd T(row_num,row_num);\n T = krylov.transpose() * d_matrix * krylov;\n\n return T;\n}\n\n// Function for QR decomposition\n// Because we only need Q for the power method, I will retun only Q\nMatrixXd qrdecomp(MatrixXd &Tridiag){\n // Q is and orthonormal vector => Q'Q = 1\n MatrixXd Id = MatrixXd::Identity(Tridiag.rows(), Tridiag.cols());\n MatrixXd Q = Id;\n MatrixXd P(Tridiag.rows(), Tridiag.cols());\n\n // R is the upper triangular matrix\n MatrixXd R = Tridiag;\n\n int row_num = Tridiag.rows();\n int countx = 0, county = 0;\n\n // Scale R \n double sum = 0.0, sigma, tau, fak, max_val = 0;\n\n bool sing;\n\n // Defining vectors for algorithm\n MatrixXd diag(row_num,1);\n\n //std::cout << R << '\\n';\n\n for (int i = 0; i < row_num-1; ++i){\n diag = MatrixXd::Zero(row_num, 1);\n\n sum = 0;\n for (size_t j = i; j < row_num; ++j){\n sum += R(j,i) * R(j,i);\n //std::cout << R(j,i) << '\\n';\n }\n sum = sqrt(sum);\n\n if (R(i,i) > 0){\n sigma = -sum;\n }\n else{\n sigma = sum;\n }\n\n //std::cout << \"sigma is: \" << sigma << '\\n';\n\n sum = 0;\n //diag = R.block(i,i, row_num - i, 1);\n //std::cout << \"diag is: \" << '\\n';\n for (int j = i; j < row_num; ++j){\n //std::cout << i << '\\t' << j << '\\n';\n if (j == i){\n diag(j) = R(j,i) + sigma;\n }\n else{\n diag(j) = R(j, i);\n }\n //std::cout << diag(j) << '\\n';\n sum = sum + diag(j) * diag(j);\n }\n sum = sqrt(sum);\n\n //std::cout << \"sum is: \" << sum << '\\n';\n\n if (sum > 0.000000000000001){\n\n for (int j = i; j < row_num; ++j){\n diag(j) = diag(j) / sum;\n }\n \n //std::cout << \"normalized diag is: \" << '\\n' << diag << '\\n';\n \n P = Id - (diag * diag.transpose()) * 2.0;\n \n R = P * R;\n Q = Q * P;\n }\n\n //std::cout << \"R is: \" << R << '\\n';\n\n }\n //std::cout << \"R is: \" << R << '\\n';\n //std::cout << \"Q is: \" << Q << '\\n';\n\n //std::cout << \"QR is: \" << '\\n' << Q*R << '\\n';\n\n //std::cout << \"Q^T * Q is: \" << '\\n' << Q.transpose() * Q << '\\n' << '\\n';\n //std::cout << \"QR - A is: \" << '\\n' << Q*R - Tridiag << '\\n';\n //std::cout << \"Q^T * A - R: \" << '\\n'\n // << Q.transpose() * Tridiag - R << '\\n' << '\\n';\n\n return Q;\n}\n\n// Function to perform the Power Method\nvoid p_method(MatrixXd &Tridiag, MatrixXd &Q){\n\n //std::cout << \"Q is: \" << '\\n' << Q << '\\n';\n //std::cout << \"Tridiag is : \" << '\\n' << Tridiag << '\\n';\n\n // Find all eigenvectors\n MatrixXd eigenvectors(Tridiag.rows(), Tridiag.cols());\n MatrixXd Z(Tridiag.rows(), Tridiag.cols());\n MatrixXd Qtemp = Q;\n\n // Iteratively defines eigenvectors\n for (int i = 0; i < Tridiag.rows(); ++i){\n Z = Tridiag * Q;\n Q = qrdecomp(Z);\n\n }\n\n Qtemp = Qtemp - Q;\n //std::cout << \"This should not be 0: \" << Qtemp.squaredNorm() << '\\n';\n\n}\n\n// Function to return sign of value (signum function)\nint sign(double value){\n if (value < 0.0){\n return -1;\n }\n else if (value > 0){\n return 1;\n }\n else {\n return 0;\n }\n}\n\n// Function to check eigenvectors and values\nvoid eigentest(MatrixXd &Tridiag, MatrixXd &Q){\n\n // Calculating the Rayleigh quotient (v^t * A * v) / (v^t * v)\n // Note, that this should be a representation of eigenvalues\n\n std::vector eigenvalues(Tridiag.rows());\n MatrixXd checkvector(Tridiag.rows(),1);\n double QQ, QAQ;\n\n for (size_t i = 0; i < Tridiag.rows(); ++i){\n QQ = Q.col(i).transpose() * Q.col(i); \n QAQ = Q.col(i).transpose() * Tridiag * Q.col(i);\n eigenvalues[i] = QAQ / QQ;\n std::cout << \"eigenvalue is: \" << eigenvalues[i] << '\\n';\n\n checkvector = ((Tridiag * Q.col(i)) / eigenvalues[i]) - Q.col(i);\n //std::cout << checkvector << '\\n' << '\\n';\n std::cout << \"This should be 0: \" << '\\t' \n << checkvector.squaredNorm() << '\\n';\n \n }\n\n}\n\n", "meta": {"hexsha": "f7ea3788ba2bf5fb8d1158e4d3f8dcddf6f7d35e", "size": 8946, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "DMRG/Lanczos.cpp", "max_stars_repo_name": "mika314/simuleios", "max_stars_repo_head_hexsha": "0b05660c7df0cd6e31eb5e70864cbedaec29b55a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 197.0, "max_stars_repo_stars_event_min_datetime": "2015-07-26T02:04:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T11:53:33.000Z", "max_issues_repo_path": "DMRG/Lanczos.cpp", "max_issues_repo_name": "shiffman/simuleios", "max_issues_repo_head_hexsha": "57239350d2cbed10893483bda65fa323e5e3a06d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 18.0, "max_issues_repo_issues_event_min_datetime": "2015-08-04T22:55:46.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-06T02:33:48.000Z", "max_forks_repo_path": "DMRG/Lanczos.cpp", "max_forks_repo_name": "shiffman/simuleios", "max_forks_repo_head_hexsha": "57239350d2cbed10893483bda65fa323e5e3a06d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 55.0, "max_forks_repo_forks_event_min_datetime": "2015-08-02T21:43:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-13T18:25:08.000Z", "avg_line_length": 27.3577981651, "max_line_length": 80, "alphanum_fraction": 0.4765258216, "num_tokens": 2644, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.822189121808099, "lm_q2_score": 0.6076631698328917, "lm_q1q2_score": 0.49961404796003095}} {"text": " \n#ifndef SOLVERSPOINTS_HPP\n#define SOLVERSPOINTS_HPP\n\n#include \n#include \n\n// ##########################################################\n// Solver for 6L. The solver is from Stewenius paper,\n// and was implemented by Kneip.\n// ##########################################################\ntemplate \nstd::vector> solver3Q(\n std::vector, Eigen::Matrix>> ptPair,\n\tstd::vector, Eigen::Matrix>> plPair,\n\tstd::vector, Eigen::Matrix>,\n std::pair, Eigen::Matrix>>> lPair\n);\n\n#endif", "meta": {"hexsha": "b91e2e7cf8157655ceaf949562942ac113ca1d03", "size": 767, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/solverPoints.hpp", "max_stars_repo_name": "3DVisionISR/3DMinRegLineIntersect", "max_stars_repo_head_hexsha": "6ddfb39d34725f6bbd82ce7b6ca9be61fedd0438", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2020-06-11T15:50:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-23T10:36:39.000Z", "max_issues_repo_path": "include/solverPoints.hpp", "max_issues_repo_name": "3DVisionISR/3DMinRegLineIntersect", "max_issues_repo_head_hexsha": "6ddfb39d34725f6bbd82ce7b6ca9be61fedd0438", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/solverPoints.hpp", "max_forks_repo_name": "3DVisionISR/3DMinRegLineIntersect", "max_forks_repo_head_hexsha": "6ddfb39d34725f6bbd82ce7b6ca9be61fedd0438", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2020-06-17T16:25:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-22T02:23:18.000Z", "avg_line_length": 38.35, "max_line_length": 94, "alphanum_fraction": 0.6036505867, "num_tokens": 201, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.822189121808099, "lm_q2_score": 0.6076631698328916, "lm_q1q2_score": 0.49961404796003084}} {"text": "#include \"nmea/vtg.h\"\n#include \n#include \n#include \"geometry_msgs/Vector3Stamped.h\"\n#include \"angles/angles.h\"\n\nusing std::string;\nusing std::vector;\nusing std::tuple;\nusing std::get;\n\nnmea::vtg\nv3s_to_vtg_ros_msg(geometry_msgs::Vector3Stamped const &message);\n\nnmea::vtg\nv3s_to_vtg_ros_msg(geometry_msgs::Vector3Stamped const &message)\n{\n double const speedMagMps = sqrt(message.vector.x * message.vector.x +\n message.vector.y * message.vector.y +\n message.vector.z * message.vector.z);\n\n double const trackFromEastRad = atan2(-message.vector.y, message.vector.x);\n double const trackFromNorthDeg = angles::to_degrees(trackFromEastRad);\n\n nmea::vtg ros_msg;\n ros_msg.true_track_made_good = trackFromNorthDeg;\n ros_msg.magnetic_track_made_good_valid = false;\n ros_msg.ground_speed_knots = speedMagMps * 1.94384;\n ros_msg.ground_speed_kph = speedMagMps * 3.6;\n return ros_msg;\n}\n\nclass NmeaSubVelToNmeaPubVtg\n{\npublic:\n inline NmeaSubVelToNmeaPubVtg(ros::NodeHandle *const nh)\n : vtgPub(nh->advertise(\"vtg\", 10))\n {\n }\n\n inline void Callback(geometry_msgs::Vector3Stamped const &message)\n {\n this->vtgPub.publish(v3s_to_vtg_ros_msg(message));\n }\n\nprivate:\n ros::Publisher vtgPub;\n};\n\nint main(int argc, char *argv[])\n{\n ros::init(argc, argv, \"vel_to_vtg\");\n\n ros::NodeHandle n;\n NmeaSubVelToNmeaPubVtg nmea_sub_vel_to_nmea_pub_vtg(&n);\n ros::Subscriber sub =\n n.subscribe(\"gps/fix_velocity\", 10, &NmeaSubVelToNmeaPubVtg::Callback,\n &nmea_sub_vel_to_nmea_pub_vtg);\n\n ros::spin();\n\n return EXIT_SUCCESS;\n}\n", "meta": {"hexsha": "443f63c97fe6ed452f3fbdd73378f34523a252c3", "size": 1671, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/geometry_msgs_Vector3Stamped_to_vtg.cpp", "max_stars_repo_name": "geoffviola/raw_nmea", "max_stars_repo_head_hexsha": "37282e1c07a067d8f7370564c5bb94af0c34af90", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/geometry_msgs_Vector3Stamped_to_vtg.cpp", "max_issues_repo_name": "geoffviola/raw_nmea", "max_issues_repo_head_hexsha": "37282e1c07a067d8f7370564c5bb94af0c34af90", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/geometry_msgs_Vector3Stamped_to_vtg.cpp", "max_forks_repo_name": "geoffviola/raw_nmea", "max_forks_repo_head_hexsha": "37282e1c07a067d8f7370564c5bb94af0c34af90", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.109375, "max_line_length": 77, "alphanum_fraction": 0.710353082, "num_tokens": 498, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8221891130942474, "lm_q2_score": 0.6076631698328916, "lm_q1q2_score": 0.49961404266494414}} {"text": "/*----------------------------------------------------------------------------*/\n/* Copyright (c) 2020-2021 FIRST. All Rights Reserved. */\n/* Open Source Software - may be modified and shared by FRC teams. The code */\n/* must be accompanied by the FIRST BSD license file in the root directory of */\n/* the project. */\n/*----------------------------------------------------------------------------*/\n\n#include \n#include \n\n#include \"frc/geometry/Pose2d.h\"\n#include \"frc/system/LinearSystem.h\"\n#include \"frc/system/plant/LinearSystemId.h\"\n#include \"frc/trajectory/TestTrajectory.h\"\n#include \"frc/trajectory/TrajectoryGenerator.h\"\n#include \"frc/trajectory/constraint/DifferentialDriveVelocitySystemConstraint.h\"\n#include \"gtest/gtest.h\"\n\n// TODO: The constraint used in this test violates the max voltage, but not\n// doing so would mean the only way to reach the max steady-state velocity for\n// that voltage is open-loop exponential convergence. The constraint's\n// optimization intent should be clarified so we can write a better test.\nTEST(DifferentialDriveVelocitySystemTest, DISABLED_Constraint) {\n constexpr auto kMaxVoltage = 10_V;\n\n // Pick an unreasonably large kA to ensure the constraint has to do some work\n const frc::LinearSystem<2, 2, 2> system =\n frc::LinearSystemId::IdentifyDrivetrainSystem(\n 1_V / 1_mps, 3_V / 1_mps_sq, 1_V / 1_rad_per_s, 3_V / 1_rad_per_s_sq);\n const frc::DifferentialDriveKinematics kinematics{0.5_m};\n auto config = frc::TrajectoryConfig(12_mps, 12_mps_sq);\n config.AddConstraint(frc::DifferentialDriveVelocitySystemConstraint(\n system, kinematics, kMaxVoltage));\n\n auto trajectory = frc::TestTrajectory::GetTrajectory(config);\n\n auto time = 0_s;\n auto dt = 20_ms;\n auto duration = trajectory.TotalTime();\n\n while (time < duration) {\n auto point = trajectory.Sample(time);\n time += dt;\n\n const frc::ChassisSpeeds chassisSpeeds{point.velocity, 0_mps,\n point.velocity * point.curvature};\n\n auto [left, right] = kinematics.ToWheelSpeeds(chassisSpeeds);\n\n auto x = frc::MakeMatrix<2, 1>(left.to(), right.to());\n\n // Not really a strictly-correct test as we're using the chassis accel\n // instead of the wheel accel, but much easier than doing it \"properly\" and\n // a reasonable check anyway\n auto xDot = frc::MakeMatrix<2, 1>(point.acceleration.to(),\n point.acceleration.to());\n\n Eigen::Matrix u =\n system.B().householderQr().solve(xDot - system.A() * x);\n\n EXPECT_GE(u(0), -kMaxVoltage.to() - 0.5);\n EXPECT_LE(u(0), kMaxVoltage.to() + 0.5);\n EXPECT_GE(u(1), -kMaxVoltage.to() - 0.5);\n EXPECT_LE(u(1), kMaxVoltage.to() + 0.5);\n }\n}\n\nTEST(DifferentialDriveVelocitySystemTest, HighCurvature) {\n constexpr auto kMaxVoltage = 10_V;\n\n const frc::LinearSystem<2, 2, 2> system =\n frc::LinearSystemId::IdentifyDrivetrainSystem(\n 1_V / 1_mps, 3_V / 1_mps_sq, 1_V / 1_rad_per_s, 3_V / 1_rad_per_s_sq);\n // Large trackwidth - need to test with radius of curvature less than half of\n // trackwidth\n const frc::DifferentialDriveKinematics kinematics{3_m};\n\n auto config = frc::TrajectoryConfig(12_fps, 12_fps_sq);\n config.AddConstraint(frc::DifferentialDriveVelocitySystemConstraint(\n system, kinematics, kMaxVoltage));\n\n EXPECT_NO_FATAL_FAILURE(frc::TrajectoryGenerator::GenerateTrajectory(\n frc::Pose2d{1_m, 0_m, frc::Rotation2d{90_deg}}, {},\n frc::Pose2d{0_m, 1_m, frc::Rotation2d{180_deg}}, config));\n\n config.SetReversed(true);\n\n EXPECT_NO_FATAL_FAILURE(frc::TrajectoryGenerator::GenerateTrajectory(\n frc::Pose2d{0_m, 1_m, frc::Rotation2d{180_deg}}, {},\n frc::Pose2d{1_m, 0_m, frc::Rotation2d{90_deg}}, config));\n}\n", "meta": {"hexsha": "4ddd5b1065eaf405f9fa8e750b65da24b11ae5e5", "size": 3940, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/test/cpp/frc/trajectory/constraint/DifferentialDriveVelocitySystemTest.cpp", "max_stars_repo_name": "frc3512/Robot-2019", "max_stars_repo_head_hexsha": "376a94f138562f8af59215f5e21a41a68b3f5cd2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2019-07-05T01:06:25.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-17T15:18:49.000Z", "max_issues_repo_path": "src/test/cpp/frc/trajectory/constraint/DifferentialDriveVelocitySystemTest.cpp", "max_issues_repo_name": "frc3512/Robot-2019", "max_issues_repo_head_hexsha": "376a94f138562f8af59215f5e21a41a68b3f5cd2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/test/cpp/frc/trajectory/constraint/DifferentialDriveVelocitySystemTest.cpp", "max_forks_repo_name": "frc3512/Robot-2019", "max_forks_repo_head_hexsha": "376a94f138562f8af59215f5e21a41a68b3f5cd2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-02-14T16:21:42.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-14T16:21:42.000Z", "avg_line_length": 42.8260869565, "max_line_length": 80, "alphanum_fraction": 0.6626903553, "num_tokens": 1046, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8947894520743981, "lm_q2_score": 0.5583269943353744, "lm_q1q2_score": 0.4995851053396952}} {"text": "/**\n * Copyright (C) Omar Thor - All Rights Reserved\n * Unauthorized copying of this file, via any medium is strictly prohibited\n * Proprietary and confidential\n *\n * Written by Omar Thor , 2017\n */\n\n#ifndef SP_ALGO_NN_LAYER_CONV_HPP\n#define SP_ALGO_NN_LAYER_CONV_HPP\n\n#include \n\n#include \"layer.hpp\"\n#include \"connectivity.hpp\"\n#include \"detail/layers.hpp\"\n#include \"sp/util/types.hpp\"\n#include \"params.hpp\"\n\n\nSP_ALGO_NN_NAMESPACE_BEGIN\n\n/**\n * \\brief Convolutional Layer implementation\n *\n * Convolves input by a set of feature kernels and returns the convolution\n * results\n *\n * Note: This convolution implementation does not rotate the kernel\n * and is therefore not convolution but cross correlation, mathematically\n * speaking.\n *\n * Summary of parameters (Soft and Hard, i.e. parameterized and derived)\n * - N - Number of images in a mini-batch\n * - C - The number of input feature maps\n * - H - Height of input image\n * - W - Width of input image\n * - K - Number of output feature maps (number of kernels)\n * - R - Height of filter kernel\n * - S - Width of filter kernel\n * - U - Vertical Stride\n * - V - Horizontal Stride\n * - pad_h - Height of zero-padding\n * - pad_w - Width of zero-padding\n *\n * Output O of is a four dimensional tensor in R^NKPQ,\n * where ```P = f(H, R, u, pad_h) and Q = f(W, S, v, pad_w)```\n * where\n * * ```f(H, R, u, pad_h) = ceil((H - R + 1 + 2 * pad_h) / 2)```\n * * ```f(W, S, v, pad_w) = ceil((W - S + 1 + 2 * pad_w) / 2)```\n *\n * Convolution Modes (common LA terms, matlab, octave)\n * - valid - ```pad_w = pad_h = 0```\n * - same - ```pad_h = R/2 and pad_w = S / 2```\n * - full - ```pad_h = R - 1, pad_w = S - 1```\n *\n * \\tparam InputDim, includes:\n * - Width\n * - Height\n * - Depth (or channels)\n * \\tparam KernelDim, includes:... tbd\n * \\tparam Biased (optional) default true. Whether or not the layer contains bias.\n * \\tparam Sparsity The sparsity object. See #group_sparsity and #no_sparsity\n */\ntemplate<\n typename InputVolume,\n typename KernelParams = kernel_params_default,\n bool Biased = true,\n typename Connectivity = full_connectivity,\n size_t Dilation = 0\n>\nstruct conv_layer : layer<\n InputVolume,\n detail::convolution_kernel_out_dims_t,\n conv_layer\n> {\n\n /**\n * Base type\n */\n using base = layer<\n InputVolume,\n detail::convolution_kernel_out_dims_t,\n conv_layer\n >;\n\n /**\n * Validates KernelParams type\n */\n static_assert(util::is_instantiation_of_v, \"KernelParams template parameter must be an instance of kernel_params\");\n\n /**\n * Kernel parameters\n */\n using kernel_params = KernelParams;\n\n /**\n * Connectivity type (full, ngroups, etc)\n */\n using connectivity_type = Connectivity;\n\n /**\n * \\brief Dilation parameter\n * \\todo TBD: https://arxiv.org/abs/1511.07122\n */\n static_assert(Dilation == 0, \"Dilation is not implemented\");\n\n /**\n * Whether or not this layer has biased\n */\n constexpr static bool biased = Biased;\n\n /**\n * \\brief Input Dimensions\n */\n using input_dims = typename base::input_dims;\n\n /**\n * \\brief Output Dimensions\n */\n using output_dims = typename base::output_dims;\n\n /**\n * \\brief Weight dimensions\n */\n using weights_dims = weight_dims<\n output_dims::d,\n input_dims::d,\n kernel_params::h,\n kernel_params::w\n >;\n\n void forward_prop_impl(tensor_4& input, tensor_4& output) {\n\n /**\n * Number of samples in the input\n */\n const size_t samples = input.dimension(0);\n\n /**\n * Perform forward propagation for every sample\n */\n #pragma omp parallel for simd\n for (size_t si = 0; si < samples; ++si) {\n /**\n * Loop over the pairs of (D_out, D_in), i.e. input_dims::d*output_dims::d\n */\n for (size_t od = 0; od < output_dims::d; ++od) {\n for (size_t id = 0; id < input_dims::d; ++id) {\n if(connections(od, id)) {\n /*\n * If the output channel is connected to the input channel,\n * then perform convolution. This is done to support limited\n * connectivity when required\n *\n * Then, perform the convolution op\n *\n * \\todo add dilation\n * \\todo Optimize for smaller kernels (common, 2x2, 3x3, etc)\n */\n for (size_t oy = 0, iny = 0; oy < output_dims::h; ++oy, iny += kernel_params::s_h) {\n for (size_t ox = 0, inx = 0; ox < output_dims::w; ++ox, inx += kernel_params::s_w) {\n float_t sum = 0;\n for (size_t ky = 0; ky < kernel_params::h; ++ky) {\n for (size_t kx = 0; kx < kernel_params::w; ++kx) {\n auto& in_val = input(si, id, iny+ky, inx+kx);\n auto& w_val = w(od, id, ky, kx);\n sum += in_val * w_val;\n }\n }\n output(si, od, oy, ox) += sum;\n }\n }\n }\n }\n if constexpr(biased) {\n /**\n * Add bias to every output vector the depth slice at output(od)\n */\n output.chip(si, 0).chip(od, 0) = output.chip(si, 0).chip(od, 0) + b(od);\n }\n }\n }\n }\n\n /**\n * \\brief Back propagation implementation\n *\n * \\todo Optimize\n */\n void backward_prop_impl( tensor_4& prev_out,\n tensor_4& prev_delta,\n tensor_4& curr_out,\n tensor_4& curr_delta) {\n\n\n /**\n * Number of samples in the previous output\n */\n const size_t samples = prev_out.dimension(0);\n\n /**\n * Perform back propagation\n *\n * For every sample in the input\n */\n #pragma omp parallel for simd\n for(size_t si = 0; si < samples; ++si) {\n /**\n * For every (input depth, output depth) pair that is connected\n */\n for (size_t id = 0; id < input_dims::d; ++id) {\n for (size_t od = 0; od < output_dims::d; ++od) {\n if(connections(od, id)) {\n /* Propagate the current delta to the previous delta through the kernel */\n for (size_t oy = 0, iny = 0; oy < output_dims::h; ++oy, iny += kernel_params::s_h) {\n for (size_t ox = 0, inx = 0; ox < output_dims::w; ++ox, inx += kernel_params::s_w) {\n float_t& grad = curr_delta(si, od, oy, ox);\n for (size_t wy = 0; wy < weights_dims::h; ++wy) {\n for (size_t wx = 0; wx < weights_dims::w; ++wx) {\n auto& w_val = w(od, id, wy, wx);\n prev_delta(si, id, iny + wy, inx + wx) += w_val * grad;\n }\n }\n }\n }\n }\n }\n }\n for (size_t id = 0; id < input_dims::d; ++id) {\n for (size_t od = 0; od < output_dims::d; ++od) {\n if(connections(od, id)) {\n for (size_t wy = 0; wy < weights_dims::h; ++wy) {\n for (size_t wx = 0; wx < weights_dims::w; ++wx) {\n float_t delta = 0;\n for (size_t oy = 0, iny = 0; oy < output_dims::h; ++oy, iny += kernel_params::s_h) {\n for (size_t ox = 0, inx = 0; ox < output_dims::w; ++ox, inx += kernel_params::s_w) {\n auto& po = prev_out(si, id, oy + wy, ox + wx);\n auto& cd = curr_delta(si, od, oy, ox);\n delta += po * cd;\n }\n }\n dw(si, od, id, wy, wx) += delta;\n }\n }\n }\n }\n }\n if (biased) {\n for (size_t od = 0; od < output_dims::d; ++od) {\n tensor_0 sum = curr_delta.chip(si, 0).chip(od, 0).sum();\n db(si, od) += sum(0);\n }\n }\n }\n }\n\n /**\n * \\brief Weights of the layer.\n */\n weights_type w;\n\n /**\n * Weights Delta\n */\n weights_delta_type dw;\n\n /**\n * \\brief Bias.\n */\n bias_type b;\n\n /**\n * Bias Delta.\n */\n bias_delta_type db;\n\n connectivity_type connections;\n\n};\n\n\nSP_ALGO_GEN_NAMESPACE_END\n\n#endif\t/* SP_ALGO_NN_LAYER_CONV_HPP */\n\n", "meta": {"hexsha": "59cf223ad9bd2ec3a4ce550068734759607b0686", "size": 9612, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/sp/algo/nn/layer/convolution.hpp", "max_stars_repo_name": "thorigin/sp", "max_stars_repo_head_hexsha": "a837b4fcb5b7184591585082012942bbdb8f11f9", "max_stars_repo_licenses": ["FSFAP"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/sp/algo/nn/layer/convolution.hpp", "max_issues_repo_name": "thorigin/sp", "max_issues_repo_head_hexsha": "a837b4fcb5b7184591585082012942bbdb8f11f9", "max_issues_repo_licenses": ["FSFAP"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/sp/algo/nn/layer/convolution.hpp", "max_forks_repo_name": "thorigin/sp", "max_forks_repo_head_hexsha": "a837b4fcb5b7184591585082012942bbdb8f11f9", "max_forks_repo_licenses": ["FSFAP"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4912891986, "max_line_length": 148, "alphanum_fraction": 0.4783603829, "num_tokens": 2299, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8376199511728004, "lm_q2_score": 0.5964331462646255, "lm_q1q2_score": 0.4995843028520153}} {"text": "// (C) Copyright Nick Thompson 2019.\n// Use, modification and distribution are subject to the\n// Boost Software License, Version 1.0. (See accompanying file\n// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n#ifndef BOOST_MATH_DIFFERENTIATION_LANCZOS_SMOOTHING_HPP\n#define BOOST_MATH_DIFFERENTIATION_LANCZOS_SMOOTHING_HPP\n#include // for std::abs\n#include \n#include // to nan initialize\n#include \n#include \n#include \n#include \n#include \n\nnamespace boost::math::differentiation {\n\nnamespace detail {\ntemplate \nclass discrete_legendre {\n public:\n explicit discrete_legendre(std::size_t n, Real x) : m_n{n}, m_r{2}, m_x{x},\n m_qrm2{1}, m_qrm1{x},\n m_qrm2p{0}, m_qrm1p{1},\n m_qrm2pp{0}, m_qrm1pp{0}\n {\n using std::abs;\n BOOST_MATH_ASSERT_MSG(abs(m_x) <= 1, \"Three term recurrence is stable only for |x| <=1.\");\n // The integer n indexes a family of discrete Legendre polynomials indexed by k <= 2*n\n }\n\n Real norm_sq(int r) const\n {\n Real prod = Real(2) / Real(2 * r + 1);\n for (int k = -r; k <= r; ++k) {\n prod *= Real(2 * m_n + 1 + k) / Real(2 * m_n);\n }\n return prod;\n }\n\n Real next()\n {\n Real N = 2 * m_n + 1;\n Real num = (m_r - 1) * (N * N - (m_r - 1) * (m_r - 1)) * m_qrm2;\n Real tmp = (2 * m_r - 1) * m_x * m_qrm1 - num / Real(4 * m_n * m_n);\n m_qrm2 = m_qrm1;\n m_qrm1 = tmp / m_r;\n ++m_r;\n return m_qrm1;\n }\n\n Real next_prime()\n {\n Real N = 2 * m_n + 1;\n Real s = (m_r - 1) * (N * N - (m_r - 1) * (m_r - 1)) / Real(4 * m_n * m_n);\n Real tmp1 = ((2 * m_r - 1) * m_x * m_qrm1 - s * m_qrm2) / m_r;\n Real tmp2 = ((2 * m_r - 1) * (m_qrm1 + m_x * m_qrm1p) - s * m_qrm2p) / m_r;\n m_qrm2 = m_qrm1;\n m_qrm1 = tmp1;\n m_qrm2p = m_qrm1p;\n m_qrm1p = tmp2;\n ++m_r;\n return m_qrm1p;\n }\n\n Real next_dbl_prime()\n {\n Real N = 2*m_n + 1;\n Real trm1 = 2*m_r - 1;\n Real s = (m_r - 1) * (N * N - (m_r - 1) * (m_r - 1)) / Real(4 * m_n * m_n);\n Real rqrpp = 2*trm1*m_qrm1p + trm1*m_x*m_qrm1pp - s*m_qrm2pp;\n Real tmp1 = ((2 * m_r - 1) * m_x * m_qrm1 - s * m_qrm2) / m_r;\n Real tmp2 = ((2 * m_r - 1) * (m_qrm1 + m_x * m_qrm1p) - s * m_qrm2p) / m_r;\n m_qrm2 = m_qrm1;\n m_qrm1 = tmp1;\n m_qrm2p = m_qrm1p;\n m_qrm1p = tmp2;\n m_qrm2pp = m_qrm1pp;\n m_qrm1pp = rqrpp/m_r;\n ++m_r;\n return m_qrm1pp;\n }\n\n Real operator()(Real x, std::size_t k)\n {\n BOOST_MATH_ASSERT_MSG(k <= 2 * m_n, \"r <= 2n is required.\");\n if (k == 0)\n {\n return 1;\n }\n if (k == 1)\n {\n return x;\n }\n Real qrm2 = 1;\n Real qrm1 = x;\n Real N = 2 * m_n + 1;\n for (std::size_t r = 2; r <= k; ++r) {\n Real num = (r - 1) * (N * N - (r - 1) * (r - 1)) * qrm2;\n Real tmp = (2 * r - 1) * x * qrm1 - num / Real(4 * m_n * m_n);\n qrm2 = qrm1;\n qrm1 = tmp / r;\n }\n return qrm1;\n }\n\n Real prime(Real x, std::size_t k) {\n BOOST_MATH_ASSERT_MSG(k <= 2 * m_n, \"r <= 2n is required.\");\n if (k == 0) {\n return 0;\n }\n if (k == 1) {\n return 1;\n }\n Real qrm2 = 1;\n Real qrm1 = x;\n Real qrm2p = 0;\n Real qrm1p = 1;\n Real N = 2 * m_n + 1;\n for (std::size_t r = 2; r <= k; ++r) {\n Real s =\n (r - 1) * (N * N - (r - 1) * (r - 1)) / Real(4 * m_n * m_n);\n Real tmp1 = ((2 * r - 1) * x * qrm1 - s * qrm2) / r;\n Real tmp2 = ((2 * r - 1) * (qrm1 + x * qrm1p) - s * qrm2p) / r;\n qrm2 = qrm1;\n qrm1 = tmp1;\n qrm2p = qrm1p;\n qrm1p = tmp2;\n }\n return qrm1p;\n }\n\n private:\n std::size_t m_n;\n std::size_t m_r;\n Real m_x;\n Real m_qrm2;\n Real m_qrm1;\n Real m_qrm2p;\n Real m_qrm1p;\n Real m_qrm2pp;\n Real m_qrm1pp;\n};\n\ntemplate \nstd::vector interior_velocity_filter(std::size_t n, std::size_t p) {\n auto dlp = discrete_legendre(n, 0);\n std::vector coeffs(p+1);\n coeffs[1] = 1/dlp.norm_sq(1);\n for (std::size_t l = 3; l < p + 1; l += 2)\n {\n dlp.next_prime();\n coeffs[l] = dlp.next_prime()/ dlp.norm_sq(l);\n }\n\n // We could make the filter length n, as f[0] = 0,\n // but that'd make the indexing awkward when applying the filter.\n std::vector f(n + 1);\n // This value should never be read, but this is the correct value *if it is read*.\n // Hmm, should it be a nan then? I'm not gonna agonize.\n f[0] = 0;\n for (std::size_t j = 1; j < f.size(); ++j)\n {\n Real arg = Real(j) / Real(n);\n dlp = discrete_legendre(n, arg);\n f[j] = coeffs[1]*arg;\n for (std::size_t l = 3; l <= p; l += 2)\n {\n dlp.next();\n f[j] += coeffs[l]*dlp.next();\n }\n f[j] /= (n * n);\n }\n return f;\n}\n\ntemplate \nstd::vector boundary_velocity_filter(std::size_t n, std::size_t p, int64_t s)\n{\n std::vector coeffs(p+1, std::numeric_limits::quiet_NaN());\n Real sn = Real(s) / Real(n);\n auto dlp = discrete_legendre(n, sn);\n coeffs[0] = 0;\n coeffs[1] = 1/dlp.norm_sq(1);\n for (std::size_t l = 2; l < p + 1; ++l)\n {\n // Calculation of the norms is common to all filters,\n // so it seems like an obvious optimization target.\n // I tried this: The spent in computing the norms time is not negligible,\n // but still a small fraction of the total compute time.\n // Hence I'm not refactoring out these norm calculations.\n coeffs[l] = dlp.next_prime()/ dlp.norm_sq(l);\n }\n\n std::vector f(2*n + 1);\n for (std::size_t k = 0; k < f.size(); ++k)\n {\n Real j = Real(k) - Real(n);\n Real arg = j/Real(n);\n dlp = discrete_legendre(n, arg);\n f[k] = coeffs[1]*arg;\n for (std::size_t l = 2; l <= p; ++l)\n {\n f[k] += coeffs[l]*dlp.next();\n }\n f[k] /= (n * n);\n }\n return f;\n}\n\ntemplate \nstd::vector acceleration_filter(std::size_t n, std::size_t p, int64_t s)\n{\n BOOST_MATH_ASSERT_MSG(p <= 2*n, \"Approximation order must be <= 2*n\");\n BOOST_MATH_ASSERT_MSG(p > 2, \"Approximation order must be > 2\");\n\n std::vector coeffs(p+1, std::numeric_limits::quiet_NaN());\n Real sn = Real(s) / Real(n);\n auto dlp = discrete_legendre(n, sn);\n coeffs[0] = 0;\n coeffs[1] = 0;\n for (std::size_t l = 2; l < p + 1; ++l)\n {\n coeffs[l] = dlp.next_dbl_prime()/ dlp.norm_sq(l);\n }\n\n std::vector f(2*n + 1, 0);\n for (std::size_t k = 0; k < f.size(); ++k)\n {\n Real j = Real(k) - Real(n);\n Real arg = j/Real(n);\n dlp = discrete_legendre(n, arg);\n for (std::size_t l = 2; l <= p; ++l)\n {\n f[k] += coeffs[l]*dlp.next();\n }\n f[k] /= (n * n * n);\n }\n return f;\n}\n\n\n} // namespace detail\n\ntemplate \nclass discrete_lanczos_derivative {\npublic:\n discrete_lanczos_derivative(Real const & spacing,\n std::size_t n = 18,\n std::size_t approximation_order = 3)\n : m_dt{spacing}\n {\n static_assert(!std::is_integral_v,\n \"Spacing must be a floating point type.\");\n BOOST_MATH_ASSERT_MSG(spacing > 0,\n \"Spacing between samples must be > 0.\");\n\n if constexpr (order == 1)\n {\n BOOST_MATH_ASSERT_MSG(approximation_order <= 2 * n,\n \"The approximation order must be <= 2n\");\n BOOST_MATH_ASSERT_MSG(approximation_order >= 2,\n \"The approximation order must be >= 2\");\n\n if constexpr (std::is_same_v || std::is_same_v)\n {\n auto interior = detail::interior_velocity_filter(n, approximation_order);\n m_f.resize(interior.size());\n for (std::size_t j = 0; j < interior.size(); ++j)\n {\n m_f[j] = static_cast(interior[j])/m_dt;\n }\n }\n else\n {\n m_f = detail::interior_velocity_filter(n, approximation_order);\n for (auto & x : m_f)\n {\n x /= m_dt;\n }\n }\n\n m_boundary_filters.resize(n);\n // This for loop is a natural candidate for parallelization.\n // But does it matter? Probably not.\n for (std::size_t i = 0; i < n; ++i)\n {\n if constexpr (std::is_same_v || std::is_same_v)\n {\n int64_t s = static_cast(i) - static_cast(n);\n auto bf = detail::boundary_velocity_filter(n, approximation_order, s);\n m_boundary_filters[i].resize(bf.size());\n for (std::size_t j = 0; j < bf.size(); ++j)\n {\n m_boundary_filters[i][j] = static_cast(bf[j])/m_dt;\n }\n }\n else\n {\n int64_t s = static_cast(i) - static_cast(n);\n m_boundary_filters[i] = detail::boundary_velocity_filter(n, approximation_order, s);\n for (auto & bf : m_boundary_filters[i])\n {\n bf /= m_dt;\n }\n }\n }\n }\n else if constexpr (order == 2)\n {\n // High precision isn't warranted for small p; only for large p.\n // (The computation appears stable for large n.)\n // But given that the filters are reusable for many vectors,\n // it's better to do a high precision computation and then cast back,\n // since the resulting cost is a factor of 2, and the cost of the filters not working is hours of debugging.\n if constexpr (std::is_same_v || std::is_same_v)\n {\n auto f = detail::acceleration_filter(n, approximation_order, 0);\n m_f.resize(n+1);\n for (std::size_t i = 0; i < m_f.size(); ++i)\n {\n m_f[i] = static_cast(f[i+n])/(m_dt*m_dt);\n }\n m_boundary_filters.resize(n);\n for (std::size_t i = 0; i < n; ++i)\n {\n int64_t s = static_cast(i) - static_cast(n);\n auto bf = detail::acceleration_filter(n, approximation_order, s);\n m_boundary_filters[i].resize(bf.size());\n for (std::size_t j = 0; j < bf.size(); ++j)\n {\n m_boundary_filters[i][j] = static_cast(bf[j])/(m_dt*m_dt);\n }\n }\n }\n else\n {\n // Given that the purpose is denoising, for higher precision calculations,\n // the default precision should be fine.\n auto f = detail::acceleration_filter(n, approximation_order, 0);\n m_f.resize(n+1);\n for (std::size_t i = 0; i < m_f.size(); ++i)\n {\n m_f[i] = f[i+n]/(m_dt*m_dt);\n }\n m_boundary_filters.resize(n);\n for (std::size_t i = 0; i < n; ++i)\n {\n int64_t s = static_cast(i) - static_cast(n);\n m_boundary_filters[i] = detail::acceleration_filter(n, approximation_order, s);\n for (auto & bf : m_boundary_filters[i])\n {\n bf /= (m_dt*m_dt);\n }\n }\n }\n }\n else\n {\n BOOST_MATH_ASSERT_MSG(false, \"Derivatives of order 3 and higher are not implemented.\");\n }\n }\n\n Real get_spacing() const\n {\n return m_dt;\n }\n\n template\n Real operator()(RandomAccessContainer const & v, std::size_t i) const\n {\n static_assert(std::is_same_v,\n \"The type of the values in the vector provided does not match the type in the filters.\");\n\n BOOST_MATH_ASSERT_MSG(std::size(v) >= m_boundary_filters[0].size(),\n \"Vector must be at least as long as the filter length\");\n\n if constexpr (order==1)\n {\n if (i >= m_f.size() - 1 && i <= std::size(v) - m_f.size())\n {\n // The filter has length >= 1:\n Real dvdt = m_f[1] * (v[i + 1] - v[i - 1]);\n for (std::size_t j = 2; j < m_f.size(); ++j)\n {\n dvdt += m_f[j] * (v[i + j] - v[i - j]);\n }\n return dvdt;\n }\n\n // m_f.size() = N+1\n if (i < m_f.size() - 1)\n {\n auto &bf = m_boundary_filters[i];\n Real dvdt = bf[0]*v[0];\n for (std::size_t j = 1; j < bf.size(); ++j)\n {\n dvdt += bf[j] * v[j];\n }\n return dvdt;\n }\n\n if (i > std::size(v) - m_f.size() && i < std::size(v))\n {\n int k = std::size(v) - 1 - i;\n auto &bf = m_boundary_filters[k];\n Real dvdt = bf[0]*v[std::size(v)-1];\n for (std::size_t j = 1; j < bf.size(); ++j)\n {\n dvdt += bf[j] * v[std::size(v) - 1 - j];\n }\n return -dvdt;\n }\n }\n else if constexpr (order==2)\n {\n if (i >= m_f.size() - 1 && i <= std::size(v) - m_f.size())\n {\n Real d2vdt2 = m_f[0]*v[i];\n for (std::size_t j = 1; j < m_f.size(); ++j)\n {\n d2vdt2 += m_f[j] * (v[i + j] + v[i - j]);\n }\n return d2vdt2;\n }\n\n // m_f.size() = N+1\n if (i < m_f.size() - 1)\n {\n auto &bf = m_boundary_filters[i];\n Real d2vdt2 = bf[0]*v[0];\n for (std::size_t j = 1; j < bf.size(); ++j)\n {\n d2vdt2 += bf[j] * v[j];\n }\n return d2vdt2;\n }\n\n if (i > std::size(v) - m_f.size() && i < std::size(v))\n {\n int k = std::size(v) - 1 - i;\n auto &bf = m_boundary_filters[k];\n Real d2vdt2 = bf[0] * v[std::size(v) - 1];\n for (std::size_t j = 1; j < bf.size(); ++j)\n {\n d2vdt2 += bf[j] * v[std::size(v) - 1 - j];\n }\n return d2vdt2;\n }\n }\n\n // OOB access:\n std::string msg = \"Out of bounds access in Lanczos derivative.\";\n msg += \"Input vector has length \" + std::to_string(std::size(v)) + \", but user requested access at index \" + std::to_string(i) + \".\";\n throw std::out_of_range(msg);\n return std::numeric_limits::quiet_NaN();\n }\n\n template\n void operator()(RandomAccessContainer const & v, RandomAccessContainer & w) const\n {\n static_assert(std::is_same_v,\n \"The type of the values in the vector provided does not match the type in the filters.\");\n if (&w[0] == &v[0])\n {\n throw std::logic_error(\"This transform cannot be performed in-place.\");\n }\n\n if (std::size(v) < m_boundary_filters[0].size())\n {\n std::string msg = \"The input vector must be at least as long as the filter length. \";\n msg += \"The input vector has length = \" + std::to_string(std::size(v)) + \", the filter has length \" + std::to_string(m_boundary_filters[0].size());\n throw std::length_error(msg);\n }\n\n if (std::size(w) < std::size(v))\n {\n std::string msg = \"The output vector (containing the derivative) must be at least as long as the input vector.\";\n msg += \"The output vector has length = \" + std::to_string(std::size(w)) + \", the input vector has length \" + std::to_string(std::size(v));\n throw std::length_error(msg);\n }\n\n if constexpr (order==1)\n {\n for (std::size_t i = 0; i < m_f.size() - 1; ++i)\n {\n auto &bf = m_boundary_filters[i];\n Real dvdt = bf[0] * v[0];\n for (std::size_t j = 1; j < bf.size(); ++j)\n {\n dvdt += bf[j] * v[j];\n }\n w[i] = dvdt;\n }\n\n for(std::size_t i = m_f.size() - 1; i <= std::size(v) - m_f.size(); ++i)\n {\n Real dvdt = m_f[1] * (v[i + 1] - v[i - 1]);\n for (std::size_t j = 2; j < m_f.size(); ++j)\n {\n dvdt += m_f[j] *(v[i + j] - v[i - j]);\n }\n w[i] = dvdt;\n }\n\n\n for(std::size_t i = std::size(v) - m_f.size() + 1; i < std::size(v); ++i)\n {\n int k = std::size(v) - 1 - i;\n auto &f = m_boundary_filters[k];\n Real dvdt = f[0] * v[std::size(v) - 1];;\n for (std::size_t j = 1; j < f.size(); ++j)\n {\n dvdt += f[j] * v[std::size(v) - 1 - j];\n }\n w[i] = -dvdt;\n }\n }\n else if constexpr (order==2)\n {\n // m_f.size() = N+1\n for (std::size_t i = 0; i < m_f.size() - 1; ++i)\n {\n auto &bf = m_boundary_filters[i];\n Real d2vdt2 = 0;\n for (std::size_t j = 0; j < bf.size(); ++j)\n {\n d2vdt2 += bf[j] * v[j];\n }\n w[i] = d2vdt2;\n }\n\n for (std::size_t i = m_f.size() - 1; i <= std::size(v) - m_f.size(); ++i)\n {\n Real d2vdt2 = m_f[0]*v[i];\n for (std::size_t j = 1; j < m_f.size(); ++j)\n {\n d2vdt2 += m_f[j] * (v[i + j] + v[i - j]);\n }\n w[i] = d2vdt2;\n }\n\n for (std::size_t i = std::size(v) - m_f.size() + 1; i < std::size(v); ++i)\n {\n int k = std::size(v) - 1 - i;\n auto &bf = m_boundary_filters[k];\n Real d2vdt2 = bf[0] * v[std::size(v) - 1];\n for (std::size_t j = 1; j < bf.size(); ++j)\n {\n d2vdt2 += bf[j] * v[std::size(v) - 1 - j];\n }\n w[i] = d2vdt2;\n }\n }\n }\n\n template\n RandomAccessContainer operator()(RandomAccessContainer const & v) const\n {\n RandomAccessContainer w(std::size(v));\n this->operator()(v, w);\n return w;\n }\n\n\n // Don't copy; too big.\n discrete_lanczos_derivative( const discrete_lanczos_derivative & ) = delete;\n discrete_lanczos_derivative& operator=(const discrete_lanczos_derivative&) = delete;\n\n // Allow moves:\n discrete_lanczos_derivative(discrete_lanczos_derivative&&) = default;\n discrete_lanczos_derivative& operator=(discrete_lanczos_derivative&&) = default;\n\nprivate:\n std::vector m_f;\n std::vector> m_boundary_filters;\n Real m_dt;\n};\n\n} // namespaces\n#endif\n", "meta": {"hexsha": "4fc7954552d412377df37b4436482bf65f008923", "size": 20395, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/boost/math/differentiation/lanczos_smoothing.hpp", "max_stars_repo_name": "oleg-alexandrov/math", "max_stars_repo_head_hexsha": "2137c31eb8e52129d997a76b893f71c1da0ccc5f", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 233.0, "max_stars_repo_stars_event_min_datetime": "2015-01-12T19:26:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T09:21:47.000Z", "max_issues_repo_path": "include/boost/math/differentiation/lanczos_smoothing.hpp", "max_issues_repo_name": "oleg-alexandrov/math", "max_issues_repo_head_hexsha": "2137c31eb8e52129d997a76b893f71c1da0ccc5f", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 626.0, "max_issues_repo_issues_event_min_datetime": "2015-02-05T18:12:27.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-20T13:19:18.000Z", "max_forks_repo_path": "include/boost/math/differentiation/lanczos_smoothing.hpp", "max_forks_repo_name": "oleg-alexandrov/math", "max_forks_repo_head_hexsha": "2137c31eb8e52129d997a76b893f71c1da0ccc5f", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 243.0, "max_forks_repo_forks_event_min_datetime": "2015-01-17T17:46:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-07T12:56:26.000Z", "avg_line_length": 34.9828473413, "max_line_length": 159, "alphanum_fraction": 0.461387595, "num_tokens": 5730, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7931059609645724, "lm_q2_score": 0.6297746143530797, "lm_q1q2_score": 0.4994780007075923}} {"text": "//\n// Created by Anshuman Mishra on 11/26/19.\n//\n\n#include \"headers/Policy.h\"\n#include \n#include \n#include \n#include \n#include \"headers/MathUtils.h\"\n\nusing namespace std;\nusing namespace Eigen;\n\nPolicy::Policy (int numActions_, int stateTerms_, unsigned seed){\n// cout << \"Initializing without theta\" << endl;\n// cout << \"Theta before \" << theta.size() << endl;\n // init theta\n numActions = numActions_;\n stateTerms = stateTerms_;\n theta = MatrixXd(stateTerms, numActions);\n // unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();\n gen = default_random_engine (seed);\n// cout << \"Theta after \" << theta.size() << endl;\n}\n\nPolicy::Policy (VectorXd& theta_init, int numActions_, int stateTerms_, unsigned seed){\n numActions = numActions_;\n stateTerms = stateTerms_;\n// cout << \"Initializing with theta \" << theta_init.size() << endl;\n// cout << \"Theta before \" << theta.size() << endl;\n theta = MatrixXd::Map(theta_init.data(), stateTerms, numActions);\n// cout << \"Theta after \" << theta.size() << endl;\n gen = default_random_engine (seed);\n}\n\nPolicy::Policy(Policy pi, unsigned seed){\n numActions = pi.getNumActions();\n stateTerms = pi.getStateTerms();\n theta = MatrixXd::Map(pi.getTheta().data(), stateTerms, numActions);\n gen = default_random_engine (seed);\n}\n\nvoid Policy::setTheta(const VectorXd& newTheta) {\n// cout << \"Setting theta \" << newTheta.size() << endl;\n// cout << \"Theta before \" << theta.size() << endl;\n theta = MatrixXd::Map(newTheta.data(), theta.rows(), theta.cols());\n// cout << \"Theta after \" << theta.size() << endl;\n}\n\nVectorXd Policy::getTheta() const{\n return VectorXd::Map(theta.data(), theta.size());\n}\n\nint Policy::getNumActions() const{\n return numActions;\n}\n\nint Policy::getStateTerms() const{\n return stateTerms;\n}\n\n// Softmax Action Selection with Linear Function Approximation\nint Policy::getAction (const VectorXd& phi){\n// cout << \"Getting Action\" << endl;\n// cout << phi.cols() << \"x\" << phi.rows() << \"dot\" << theta.rows() << \"x\" << theta.cols() << endl;\n// cout << \"Phi \" << phi.transpose() << endl;\n// cout << \"Theta\\n \" << theta << endl;\n VectorXd dot = phi.transpose()*theta;\n VectorXd q = exp(dot.array() - dot.maxCoeff());\n// cout << \"q \" << q << endl;\n\n discrete_distribution dist(q.data(), q.data() + q.rows() * q.cols());\n int action = dist(gen);\n// cout << \"Action:\" << action << endl;\n return action;\n}\n\ndouble Policy::getActionProbability (const VectorXd& phi, int action) const {\n// cout << \"Getting Action Probability\";\n// cout << phi.cols() << \"x\" << phi.rows() << \"dot\" << theta.rows() << \"x\" << theta.cols() << endl;\n// cout << \"Phi \" << phi.transpose() << endl;\n// cout << \"Theta\\n\" << theta << endl;\n VectorXd dot = phi.transpose()*theta;\n// cout << \"dot \" << dot.transpose() << endl;\n VectorXd q = exp(dot.array() - dot.maxCoeff());\n\n discrete_distribution dist(q.data(), q.data() + q.rows() * q.cols());\n double p = dist.probabilities()[action];\n if (isnan(p))\n p = 1.0;\n// if (isnan(p)){\n// cout << \"Action Probability is nan\" << endl;\n// cout << \"phi: \" << phi.transpose() << endl;\n// cout << \"Q: \" << q.transpose() << endl;\n// cout << \"action: \" << action << endl;\n// getchar();\n// }\n// cout << \"p=\" << p << endl;\n return p;\n}", "meta": {"hexsha": "0501c8650e211ede44d62cbc76c4dac1e08d48c5", "size": 3366, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Project/Policy.cpp", "max_stars_repo_name": "anshuman1811/cs687-reinforcementlearning", "max_stars_repo_head_hexsha": "cf30cc0ab2b0e515cd4b643fc55c60cc5f38a481", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Project/Policy.cpp", "max_issues_repo_name": "anshuman1811/cs687-reinforcementlearning", "max_issues_repo_head_hexsha": "cf30cc0ab2b0e515cd4b643fc55c60cc5f38a481", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Project/Policy.cpp", "max_forks_repo_name": "anshuman1811/cs687-reinforcementlearning", "max_forks_repo_head_hexsha": "cf30cc0ab2b0e515cd4b643fc55c60cc5f38a481", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3267326733, "max_line_length": 101, "alphanum_fraction": 0.6256684492, "num_tokens": 905, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.793105951184112, "lm_q2_score": 0.6297746213017459, "lm_q1q2_score": 0.49947800005913506}} {"text": "//==================================================================================================\n/*!\n @file\n\n @copyright 2016 NumScale SAS\n\n Distributed under the Boost Software License, Version 1.0.\n (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)\n*/\n//==================================================================================================\n#ifndef BOOST_SIMD_ARCH_COMMON_SIMD_FUNCTION_ILOG2_HPP_INCLUDED\n#define BOOST_SIMD_ARCH_COMMON_SIMD_FUNCTION_ILOG2_HPP_INCLUDED\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace boost { namespace simd { namespace ext\n{\n namespace bd = boost::dispatch;\n namespace bs = boost::simd;\n BOOST_DISPATCH_OVERLOAD_IF(ilog2_\n , (typename A0, typename X)\n , (detail::is_native)\n , bd::cpu_\n , bs::pack_, X>\n )\n {\n using result = bd::as_integer_t;\n BOOST_FORCEINLINE result operator()( const A0& a0) const BOOST_NOEXCEPT\n {\n return bs::exponent(a0);\n }\n };\n\n BOOST_DISPATCH_OVERLOAD_IF(ilog2_\n , (typename A0, typename X)\n , (detail::is_native)\n , bd::cpu_\n , bs::pack_, X>\n )\n {\n using result = bd::as_integer_t;\n BOOST_FORCEINLINE result operator()( const A0& a0) const BOOST_NOEXCEPT\n {\n return saturated_(dec)(sizeof(bd::scalar_of_t)*8-bs::clz(bitwise_cast(a0)));\n }\n };\n\n} } }\n\n\n#endif\n\n", "meta": {"hexsha": "a9806545bd47960b4811dc046fbdffbb8e77f432", "size": 2017, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "third_party/boost/simd/arch/common/simd/function/ilog2.hpp", "max_stars_repo_name": "xmar/pythran", "max_stars_repo_head_hexsha": "dbf2e8b70ed1e4d4ac6b5f26ead4add940a72592", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2018-02-20T11:21:12.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-12T13:45:09.000Z", "max_issues_repo_path": "third_party/boost/simd/arch/common/simd/function/ilog2.hpp", "max_issues_repo_name": "xmar/pythran", "max_issues_repo_head_hexsha": "dbf2e8b70ed1e4d4ac6b5f26ead4add940a72592", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "third_party/boost/simd/arch/common/simd/function/ilog2.hpp", "max_forks_repo_name": "xmar/pythran", "max_forks_repo_head_hexsha": "dbf2e8b70ed1e4d4ac6b5f26ead4add940a72592", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2017-12-12T12:29:52.000Z", "max_forks_repo_forks_event_max_datetime": "2019-04-08T15:55:25.000Z", "avg_line_length": 33.0655737705, "max_line_length": 100, "alphanum_fraction": 0.5493306891, "num_tokens": 440, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.793105951184112, "lm_q2_score": 0.6297746074044134, "lm_q1q2_score": 0.49947798903707796}} {"text": "#ifdef MEX\n\n#include \n#include \n#include \n#undef assert\n#define assert( isOK ) ( (isOK) ? (void)0 : (void) mexErrMsgTxt(C_STR(__FILE__<<\":\"<<__LINE__<<\": failed assertion `\"<<#isOK<<\"'\"<\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n\nvoid parse_rhs(\n const int nrhs, \n const mxArray *prhs[], \n Eigen::MatrixXd & P,\n Eigen::MatrixXd & V,\n Eigen::MatrixXi & F,\n igl::SignedDistanceType & type)\n{\n using namespace std;\n using namespace igl;\n using namespace igl::matlab;\n using namespace Eigen;\n mexErrMsgTxt(nrhs >= 3, \"The number of input arguments must be >=3.\");\n\n const int dim = mxGetN(prhs[0]);\n\n\n parse_rhs_double(prhs,P);\n parse_rhs_double(prhs+1,V);\n parse_rhs_index(prhs+2,F);\n\n mexErrMsgTxt(P.cols()==3 || P.cols()==2,\"P must be #P by (3|2)\");\n mexErrMsgTxt(V.cols()==3 || V.cols()==2,\"V must be #V by (3|2)\");\n mexErrMsgTxt(V.cols()==P.cols(),\"dim(V) must be dim(P)\");\n mexErrMsgTxt(F.cols()==V.cols(),\"F must be #F by dim(V)\");\n\n type = SIGNED_DISTANCE_TYPE_PSEUDONORMAL;\n {\n int i = 3;\n while(i g_tree;\n static igl::WindingNumberAABB<\n Eigen::RowVector3d,\n Eigen::MatrixXd,\n Eigen::MatrixXi> g_hier;\n static Eigen::MatrixXd g_FN,g_VN,g_EN;\n static Eigen::MatrixXi g_E;\n static Eigen::VectorXi g_EMAP;\n\nvoid mexFunction(\n int nlhs, mxArray *plhs[], \n int nrhs, const mxArray *prhs[])\n{\n using namespace std;\n using namespace Eigen;\n using namespace igl;\n using namespace igl::matlab;\n\n igl::matlab::MexStream mout; \n std::streambuf *outbuf = cout.rdbuf(&mout);\n //mexPrintf(\"Compiled at %s on %s\\n\",__TIME__,__DATE__);\n\n MatrixXd P,V,C,N;\n MatrixXi F;\n VectorXi I;\n VectorXd S;\n SignedDistanceType type;\n parse_rhs(nrhs,prhs,P,V,F,type);\n\n if(F.rows() > 0)\n {\n switch(V.cols())\n {\n case 2:\n {\n // Persistent data not supported for 2D\n signed_distance(P,V,F,type,S,I,C,N);\n break;\n }\n case 3:\n {\n if(g_sign_type != type || g_V != V || g_F != F)\n {\n g_V = V;\n g_F = F;\n g_sign_type = type;\n // Clear the tree\n g_tree.deinit();\n\n // Prepare distance computation\n g_tree.init(V,F);\n switch(type)\n {\n default:\n assert(false && \"Unknown SignedDistanceType\");\n case SIGNED_DISTANCE_TYPE_DEFAULT:\n case SIGNED_DISTANCE_TYPE_WINDING_NUMBER:\n g_hier.set_mesh(V,F);\n g_hier.grow();\n break;\n case SIGNED_DISTANCE_TYPE_PSEUDONORMAL:\n // \"Signed Distance Computation Using the Angle Weighted Pseudonormal\"\n // [B\u00e6rentzen & Aan\u00e6s 2005]\n per_face_normals(V,F,g_FN);\n per_vertex_normals(V,F,PER_VERTEX_NORMALS_WEIGHTING_TYPE_ANGLE,\n g_FN,g_VN);\n per_edge_normals(\n V,F,PER_EDGE_NORMALS_WEIGHTING_TYPE_UNIFORM,\n g_FN,g_EN,g_E,g_EMAP);\n break;\n }\n }\n\n N.resize(P.rows(),3);\n S.resize(P.rows(),1);\n I.resize(P.rows(),1);\n C.resize(P.rows(),3);\n //for(int p = 0;p()(from_type bar)\n *\n * Copyright 2013 Bruce Ide\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n */\n\n#include \"coordinates.hpp\"\n#include \n#include \n\n#ifndef _HPP_CONVERTS\n#define _HPP_CONVERTS\n\nnamespace fr {\n\n namespace coordinates {\n\n template \n struct converter {\n\n };\n\n /***************************************************************\n * Put convert to lat_long bits here\n */\n\n template <>\n struct converter {\n \n // Converting from lat_long to lat_long is kind of odd, but I'll\n // cover the case anyway\n template \n typename std::enable_if::value,lat_long>::type\n\toperator()(const convert_from &c)\n {\n\treturn c;\n }\n\n // ECEF to Latlong\n template \n typename std::enable_if::value,lat_long>::type \n operator()(const convert_from &xyz, const ellipsoid_parameters &e = WGS84_ELLIPSOID, double tolerance = 0.0000000001 )\n {\n\tdouble diff = 2 * tolerance;\n\tdouble t = e.ee * xyz.get_z();\n\tdouble n = 0.0;\n\tdouble nph = 0.0;\n\tdouble sinPhi = 0.0;\n\tdouble lat;\n\tdouble longitude;\n\tdouble alt;\n\n\tlongitude = atan2(xyz.get_y(), xyz.get_x()) * 180 / fr::constants::pi;\n\twhile(diff > tolerance) {\n\t double zT = xyz.get_z() + t;\n\t nph = sqrt(pow(xyz.get_x(), 2) + pow(xyz.get_y(), 2) + pow(zT, 2));\n\t sinPhi = zT / nph;\n\t n = e.ae / sqrt(1 - e.ee * sinPhi * sinPhi);\n\t double told = t;\n\t t = n * e.ee * sinPhi;\n\t diff = fabs(t - told);\n\t}\n\t\n\tlat = asin(sinPhi) * 180 / fr::constants::pi;\n\talt = nph - n;\n\tlat_long retval(lat, longitude, alt);\n\treturn retval;\n }\n\n // ecef_vel to lat_long (Loses velocity information)\n \n template \n typename std::enable_if::value,lat_long>::type\n operator()(const convert_from &c, const ellipsoid_parameters &e = WGS84_ELLIPSOID)\n {\n\tecef interim = converter()(c);\n\tlat_long retval = converter()(interim, e);\n\treturn retval;\n }\n\n // tod_eci_vel to latlong (Loses velocity information)\n template \n typename std::enable_if::value,lat_long>::type\n operator()(const convert_from &c, const double &t, const ellipsoid_parameters e = WGS84_ELLIPSOID)\n {\n\t// Convert from tod_eci to ecef_vel\n\tecef_vel interim = converter()(c,t);\n\t// Then use the function before this one to convert to lat/long\n\tlat_long retval = converter()(interim, e);\n\treturn retval;\n }\n \n };\n\n /*********************************************************\n * Put convert to ECEF bits here\n */\n\n template <>\n struct converter {\n \n // ECEF to ECEF conversion. \n template \n typename std::enable_if::value,ecef>::type\n operator()(const convert_from &c)\n {\n\treturn c;\n }\n\n // latlong to ecef\n template \n typename std::enable_if::value,ecef>::type\n operator()(const convert_from &c, const ellipsoid_parameters &e = WGS84_ELLIPSOID)\n {\n\tdouble x,y,z;\n\tconst double &pi = fr::constants::pi;\n\tdouble slat = sin(c.get_lat() * pi / 180);\n\tdouble clat = cos(c.get_lat() * pi / 180);\n\tdouble slon = sin(c.get_long() * pi / 180);\n\tdouble clon = cos(c.get_long() * pi / 180);\n\tdouble n = e.ae / sqrt(1.0 - e.ee * pow(slat, 2));\n\tx = (n + c.get_alt()) * clat * clon;\n\ty = (n + c.get_alt()) * clat * slon;\n\tz = (n * (1.0 - e.ee) + c.get_alt()) * slat;\n\tecef retval(x,y,z);\n\treturn retval;\n }\n\n // ecef_vel to ecef (loses velocity information)\n template \n typename std::enable_if::value,ecef>::type\n operator()(const convert_from &c)\n {\n\tecef retval(c.get_x(), c.get_y(), c.get_z());\n\treturn retval;\n }\n\n // tod_eci to ecef (Requires time coordinate was measured)\n template \n typename std::enable_if::value,ecef>::type\n operator()(const convert_from &c, const double &at_time)\n {\n\teci_to_ecef conversion_matrix(at_time);\n\tEigen::Vector3d c_vec = c.get_xyz();\n\tEigen::Matrix3d c_mat = conversion_matrix.get();\n\tEigen::Vector3d interim = c_mat * c_vec;\n\tecef retval(interim(0), interim(1), interim(2));\n\treturn retval;\n }\n \n };\n\n\n /**********************************************************\n * Put tod_eci stuff here\n */\n\n template<>\n struct converter {\n \n // ECI to ECI conversion\n\n template \n typename std::enable_if::value,tod_eci>::type\n operator()(const convert_from &c)\n {\n\treturn c;\n }\n\n // ecef to tod_eci (Requires time coordinate was measured)\n template \n typename std::enable_if::value,tod_eci>::type\n operator()(const convert_from &c, const double &time_at)\n {\n\tecef_to_eci conversion_matrix(time_at);\n\tEigen::Vector3d c_vec = c.get_xyz();\n\tEigen::Matrix3d c_mat = conversion_matrix.get();\n\tEigen::Vector3d interim = c_mat * c_vec;\n\ttod_eci retval(interim(0), interim(1), interim(2));\n\treturn retval;\n }\n\n };\n\n /***************************************************************\n * Put ecef_vel stuff here\n */\n\n template<>\n struct converter {\n // ecef_vel to ecef_vel conversion\n template \n typename std::enable_if::value,ecef_vel>::type\n operator()(const convert_from &c)\n {\n\treturn c;\n }\n\n // tod_eci_vel to ecef_vel (requires time component)\n template \n typename std::enable_if::value,ecef_vel>::type\n operator()(const convert_from &c, const double &time_at)\n {\n\tEigen::Matrix vec = c.get_vector();\n\teci_to_ecef cm(time_at);\n\tEigen::Matrix c_mat = cm.get_xyz_vel();\n\tEigen::Matrix interim = c_mat * vec;\n\tecef_vel retval(interim(0), interim(1), interim(2), interim(3), interim(4), interim(5));\n\treturn retval;\n } \n\n };\n\n /********************************************************************\n * Put tod_eci_vel stuff here\n */\n\n template<>\n struct converter {\n \n // From tod_eci_vel to tod_eci_vel\n template \n typename std::enable_if::value,tod_eci_vel>::type\n operator()(const convert_from &c)\n {\n\treturn c;\n }\n\n // from ecef_vel to tod_eci_vel\n template \n typename std::enable_if::value,tod_eci_vel>::type\n operator()(const convert_from &c, const double &t)\n {\n\tecef_to_eci cm(t);\n\tEigen::Matrix vec = c.get_vector();\n\tEigen::Matrix c_mat = cm.get_xyz_vel();\n\tEigen::Matrix interim = c_mat * vec;\n\ttod_eci_vel retval(interim(0), interim(1), interim(2), interim(3), interim(4), interim(5));\n\treturn retval;\t\t\t \n }\n\n };\n \n }\n\n}\n\n#endif\n", "meta": {"hexsha": "15c56101b788ada20faed677a07fe6d4d83f30a5", "size": 8054, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "converts.hpp", "max_stars_repo_name": "FlyingRhenquest/coordinates", "max_stars_repo_head_hexsha": "b6558b7e49e9927b4867456f4ce9fd81ec8bab81", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "converts.hpp", "max_issues_repo_name": "FlyingRhenquest/coordinates", "max_issues_repo_head_hexsha": "b6558b7e49e9927b4867456f4ce9fd81ec8bab81", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2021-03-30T12:28:44.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-10T06:36:53.000Z", "max_forks_repo_path": "converts.hpp", "max_forks_repo_name": "FlyingRhenquest/coordinates", "max_forks_repo_head_hexsha": "b6558b7e49e9927b4867456f4ce9fd81ec8bab81", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2018-02-08T16:17:59.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-14T14:48:59.000Z", "avg_line_length": 30.6235741445, "max_line_length": 124, "alphanum_fraction": 0.6295008691, "num_tokens": 2124, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7931059414036511, "lm_q2_score": 0.6297746074044134, "lm_q1q2_score": 0.49947798287759204}} {"text": "\n#include \n#include \n#include \n\n#include \"rlbot/bot.h\"\n#include \"rlbot/rlbot_generated.h\"\n#include \"rlbot/scopedrenderer.h\"\n\n#include \"util.h\"\n\n\n\n\nusing namespace Eigen;\n\n\n\nnamespace util {\n\n\tconst float M_PI = 3.14159265;\n\n\tVector3f convert(const rlbot::flat::Vector3& vec) {\n\t\treturn { vec.x(), vec.y(), vec.z() };\n\t}\n\n\tVector3f convert(const rlbot::flat::Rotator& vec) {\n\t\t\n\t\treturn { vec.pitch(), vec.yaw(), vec.roll() };\n\t}\n\n\trlbot::flat::Vector3 convert(const Vector3f& vec) {\n\t\treturn rlbot::flat::Vector3{ vec[0], vec[1], vec[2] };\n\t}\n\n\tPhysics::Physics(const rlbot::flat::Physics& phys) :\n\t\tlocation(convert(*phys.location())),\n\t\tvelocity(convert(*phys.velocity())),\n\t\trotation(convert(*phys.rotation())),\n\t\tangularVelocity(convert(*phys.angularVelocity())) {}\n\n\n\tPhysics::Physics(const BallPhysics& bp) :\n\t\tlocation(bp.location),\n\t\tvelocity(bp.velocity),\n\t\trotation(bp.rotation),\n\t\tangularVelocity(bp.angularVelocity) {}\n\n\t\n\n\tBallPhysics::BallPhysics(const rlbot::flat::Physics& phys) :\n\t\tlocation(convert(*phys.location())),\n\t\tvelocity(convert(*phys.velocity())),\n\t\trotation(Vector3f::Zero()),\n\t\tangularVelocity(convert(*phys.angularVelocity())) {}\n\n\n\tVector3f Physics::forward() const {\n\n\t\tfloat\tsp = sin(rotation.pitch),\n\t\t\t\tcp = cos(rotation.pitch),\n\t\t\t\tsr = sin(rotation.roll),\n\t\t\t\tcr = cos(rotation.roll),\n\t\t\t\tsy = sin(rotation.yaw),\n\t\t\t\tcy = cos(rotation.yaw);\n\n\n\t\treturn { cp * cy, cp * sy, sp };\n\t}\n\n\tVector3f Physics::right() const {\n\t\t//pitch roll yaw\n\t\tfloat\tsp = sin(rotation.pitch),\n\t\t\t\tcp = cos(rotation.pitch),\n\t\t\t\tsr = sin(rotation.roll),\n\t\t\t\tcr = cos(rotation.roll),\n\t\t\t\tsy = sin(rotation.yaw),\n\t\t\t\tcy = cos(rotation.yaw);\n\n\n\t\treturn { cy * sp * sr - cr * sy, sy * sp * sr + cr * cy, -cp * sr };\n\n\t}\n\n\tVector3f Physics::up() const {\n\n\t\tfloat\tsp = sin(rotation.pitch),\n\t\t\t\tcp = cos(rotation.pitch),\n\t\t\t\tsr = sin(rotation.roll),\n\t\t\t\tcr = cos(rotation.roll),\n\t\t\t\tsy = sin(rotation.yaw),\n\t\t\t\tcy = cos(rotation.yaw);\n\n\t\treturn { -cr * cy * sp - sr * sy, -cr * sy * sp + sr * cy, cp * cr };\n\t}\n\n\tRotation::operator Vector3f() const {\n\t\treturn Vector3f(pitch, yaw, roll);\n\t}\n\n\tRotation::Rotation(const Vector3f& rot) :\n\t\tpitch(rot[0]),\n\t\tyaw(rot[1]),\n\t\troll(rot[2]) {\n\t}\n\n\tRotation::Rotation(const rlbot::flat::Rotator& rot) :\n\t\tpitch(rot.pitch()),\n\t\tyaw(rot.yaw()),\n\t\troll(rot.roll()) {}\n\t\t\n\n\tScoreInfo::ScoreInfo(const rlbot::flat::ScoreInfo& inf) :\n\t\tscore(inf.score()),\n\t\tgoals(inf.goals()),\n\t\townGoals(inf.ownGoals()),\n\t\tassists(inf.assists()),\n\t\tsaves(inf.saves()),\n\t\tshots(inf.shots()),\n\t\tdemolitions(inf.demolitions()) {}\n\n\tBoxShape::BoxShape(const rlbot::flat::BoxShape& box) :\n\t\tlength(box.length()),\n\t\twidth(box.width()),\n\t\theight(box.height()) {}\n\n\tCar::Car(const rlbot::flat::PlayerInfo& info) :\n\t\tphysics(*info.physics()),\n\t\tscoreInfo(*info.scoreInfo()),\n\t\tisDemolished(info.isDemolished()),\n\t\thasWheelContact(info.hasWheelContact()),\n\t\tisSupersonic(info.isSupersonic()),\n\t\tisBot(info.isBot()),\n\t\tjumped(info.jumped()),\n\t\tdoubleJumped(info.doubleJumped()),\n\t\tname((*info.name()).str()),\n\t\tteam(info.team()),\n\t\tboost(info.boost()),\n\t\thitbox(*info.hitbox()),\n\t\thitboxOffset(convert(*info.hitboxOffset())) {\n\t}\n\n\tTouch::Touch(const rlbot::flat::Touch& t) :\n\t\tgameSeconds(t.gameSeconds()),\n\t\tlocation(convert(*t.location())),\n\t\tnormal(convert(*t.normal())),\n\t\tteam(t.team()),\n\t\tplayerIndex(t.playerIndex()) {}\n\n\tDropShotBallInfo::DropShotBallInfo(const rlbot::flat::DropShotBallInfo& drop) :\n\t\tabsorbedForce(drop.absorbedForce()),\n\t\tdamageIndex(drop.damageIndex()),\n\t\tforceAccumRecent(drop.forceAccumRecent()) {}\n\n\tCollisionShape::CollisionShape(rlbot::flat::CollisionShape coll) : type(0) {} //TODO FIX TYPE\n\n\tBall::Ball(const rlbot::flat::BallInfo& ball) :\n\t\tphysics(*ball.physics()),\n\t\tlatestTouch(*ball.latestTouch()),\n\t\tdropShotInfo(*ball.dropShotInfo()),\n\t\tshape_type(ball.shape_type()) {}\n\n\tPredictionSlice::PredictionSlice(const rlbot::flat::PredictionSlice* slice) :\n\t\tgameSeconds(slice->gameSeconds()),\n\t\tphysics(*slice->physics()) {}\n\n\tBallPrediction::BallPrediction(const rlbot::flat::BallPrediction& bp){\n\t\tint max = bp.slices()->size();\n\t\tfor (int i = 0; i < max; ++i) {\n\t\t\tpredictionSlices.push_back(bp.slices()->Get(i));\n\n\t\t}\n\t}\n\n\n\tVector3f Car::other_goal() const{\n\t\tfloat t = team == 1 ? 1 : -1;\n\t\treturn Vector3f{ 0, -5120 * t, 92.75 };\n\t}\n\n\tVector3f Car::own_goal() const{\n\t\tfloat t = team == 1 ? 1 : -1;\n\t\treturn Vector3f{ 0, 5120 * t, 92.75 };\n\t}\n\n\t\n\n\tVector3f to_local(Car c, Vector3f target) {\n\n\t\t//TODO MAKE ACTUAL MATRIX IMPLENTATION INSTEAD OF WHATEVER THIS IS\n\n\t\t\n\t\tVector3f local = {\n\t\t\t(target - c.physics.location).dot(c.physics.forward()),\n\t\t\t(target - c.physics.location).dot(c.physics.right()),\n\t\t\t(target - c.physics.location).dot(c.physics.up())\n\t\t};\n\n\t\t\n\t\treturn local;\n\t}\n\n\ttemplate\n\tT map(T value, T old_min, T old_max, T new_min, T new_max) {\n\t\treturn (value - old_min) * (new_max - new_min) / (old_max - old_min) + new_min;\n\t}\n\n\ttemplate\n\tT clamp(T value, T min, T max) {\n\t\tif (value < min) return min;\n\t\tif (value > max) return max;\n\t\treturn value;\n\t}\n\n\trlbot::Controller optimalGroundControl(const Car& car, const std::vector& path) {\n\n\t\tbool debug = 1;\n\n\t\trlbot::Controller controller{ 0 };\n\n\t\t//take average of first 20% of the path (10)\n\t\tVector3f aim = { 0, 0, 0 };\n\t\tint len = std::min((int)path.size(), util::NUM_POINTS/5);\n\t\tfor (auto i = 0; i < len; ++i) aim += path[i];\n\t\taim /= len;\n\n\t\t//aim = path[path.size() / 2];\n\n\t\t//aim = path[0];\n\n\t\trlbot::ScopedRenderer renderer(\"OPTIMAL GROUND CONTROL \" + std::to_string(car.team));\n\n\t\t//Vector3f local = to_local(car, aim);\n\n\t\tfloat angle = atan2(aim.y() - car.physics.location.y(), \n\t\t\t\t\t\t\taim.x() - car.physics.location.x());\n\n\n\t\tangle -= car.physics.rotation.yaw;\n\n\t\n\n\t\tif (angle < -M_PI) angle += 2 * M_PI;\n\t\tif (angle > M_PI) angle -= 2 * M_PI;\n\n\t\tfloat steer = angle/M_PI;\n\n\t\tsteer *= 10;\n\t\tsteer = clamp(steer, -1, 1);\n\n\t\t//steer = steer > 0 ? 1 : -1;\n\t\t//steer = clamp(steer, -1, 1);\n\n\t\tif (debug && car.team == 0) {\n\t\t\trenderer.DrawRect3D(rlbot::Color::black, convert(aim),\n\t\t\t\t20, 20, true, true);\n\t\t}\n\n\n\t\t//if steer is way too big, slow down\n\t\t\n\t\t//bool is_going_forward = ( car.physics.forward().dot(car.physics.velocity) > 0);\n\t\t/*\n\t\tif (is_going_forward && path[path.size() -1].z() > 400) {\n\t\t\tcontroller.throttle = 1 - 2 * std::abs(steer);\n\t\t}\n\t\telse {\n\t\t\tcontroller.throttle = 1;\n\t\t}*/\n\n\t\tcontroller.throttle = clamp(1.3 - std::abs(steer), 0, 1);\n\n\t\tif (steer > 0.05) steer = 1;\n\t\tif (steer < -0.05) steer = -1;\n\n\t\tcontroller.steer = steer;\n\n\t\t\n\n\t\tif (std::abs(steer) < 0.05 && car.hasWheelContact) controller.boost = 1;\n\n\n\t\t//if car is stuck\n\t\t\n\n\t\tVector3f origin = { 0, 0, 92.75 };\n\n\t\t// If ball is at center(kickoff)\n\t\tif ((path[path.size() - 1] - origin).norm() < 5) {\n\t\t\tcontroller.boost = 1;\n\t\t\tcontroller.steer = controller.steer > 0 ? 1 : -1;\n\t\t}//else if (car.physics.velocity.norm() < 5) controller.jump = 1;\n\n\n\t\t//jump if car is close to ball\n\t\tif ((path[path.size() - 1] - path[0]).norm() < 200 && car.hasWheelContact && path[path.size() - 1].z() < 200 && std::abs(controller.steer) < 0.1 ) {\n\t\t\tcontroller.jump = 1;\n\t\t}\n\n\t\t// If car is in the air\n\t\tif (!car.hasWheelContact) {\n\t\t\tif ((path[path.size() - 1] - path[0]).norm() < 200) {\n\t\t\t\tcontroller.pitch = 1;\n\t\t\t\t\n\t\t\t\tcontroller.jump = 1;\n\t\t\t}\n\t\t\telse {\n\t\t\t\tcontroller.pitch = clamp(-2*car.physics.rotation.pitch, -1, 1);\n\t\t\t\tcontroller.yaw = clamp(2*controller.steer, -1, 1);\n\t\t\t\tcontroller.roll = clamp(-2*car.physics.rotation.roll, -1, 1);\n\t\t\t}\n\t\t}\n\n\t\t//if target is in the air slow down\n\t\tVector3f target = path[path.size() - 1];\n\t\ttarget.z() = 0;\n\n\t\tVector3f now = car.physics.location;\n\t\tnow.z() = 0;\n\n\t\tfloat groundDist = (target - now).norm();\n\n\t\t//if (path[path.size() - 1].z() > 300 && groundDist < 500 && std::abs(controller.steer) < 0.1) \n\t\t\t//controller.throttle = -1 * is_going_forward;\n\n\n\t\tif (debug && car.team == 0) {\n\t\t\trenderer.DrawString2D(\t\"boost: \" + std::to_string(controller.boost) + \"\\n\" +\n\t\t\t\t\t\t\t\t\t\"handbrake: \" + std::to_string(controller.handbrake) + \"\\n\" +\n\t\t\t\t\t\t\t\t\t\"jump: \" + std::to_string(controller.jump) + \"\\n\" +\n\t\t\t\t\t\t\t\t\t\"steer: \" + std::to_string(controller.steer) + \"\\n\" +\n\t\t\t\t\t\t\t\t\t\"throttle: \" + std::to_string(controller.throttle) + \"\\n\" +\n\t\t\t\t\t\t\t\t\t\"pitch: \" + std::to_string(controller.pitch) + \"\\n\" +\n\t\t\t\t\t\t\t\t\t\"roll: \" + std::to_string(controller.roll) + \"\\n\" +\n\t\t\t\t\t\t\t\t\t\"yaw: \" + std::to_string(controller.yaw) + \"\\n\",\n\t\t\t\t\t\t\t\t\trlbot::Color::green,\n\t\t\t\t\t\t\t\t\trlbot::flat::Vector3{ 10, 200, 0 },\n\t\t\t\t\t\t\t\t\t2, 2);\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\n\t\t}\n\n\t\treturn controller;\n\n\t}\n\n\n}//namespace util", "meta": {"hexsha": "1f26204b1a4bcdf60cc123a9973ae8893169294e", "size": 8636, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "util/util.cpp", "max_stars_repo_name": "steinraf/badbotcpp", "max_stars_repo_head_hexsha": "6b517bd0c9ce1f1b717dbfdd790e627434259219", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-04-09T23:13:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-09T23:13:03.000Z", "max_issues_repo_path": "util/util.cpp", "max_issues_repo_name": "steinraf/badbotcpp", "max_issues_repo_head_hexsha": "6b517bd0c9ce1f1b717dbfdd790e627434259219", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "util/util.cpp", "max_forks_repo_name": "steinraf/badbotcpp", "max_forks_repo_head_hexsha": "6b517bd0c9ce1f1b717dbfdd790e627434259219", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.816091954, "max_line_length": 150, "alphanum_fraction": 0.6218156554, "num_tokens": 2684, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8824278726384089, "lm_q2_score": 0.5660185351961015, "lm_q1q2_score": 0.4994705318870042}} {"text": "#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \"common.hpp\"\n#include \"fft.hpp\"\n#include \"spectrum_image.hpp\"\n#include \"segment_envelope.hpp\"\n\nspectrum_image::spectrum_image(\n const window_list_t &window,\n const std::vector< int16_t > &audio,\n int x_,\n uint32_t sample_rate_,\n uint32_t resolution_,\n uint32_t scale_,\n int weight,\n unsigned int interval\n) : x( x_ ), resolution( resolution_ ), scale( scale_ ), sample_rate( sample_rate_ ) {\n a = powf( 2.f, float( scale ) + weight );\n b = float( interval ) * float( scale );\n const auto converted = fftref( window, audio, resolution, a, b, x );\n pixels_begin = converted.second.get();\n pixels = converted.second;\n envelope = std::move( converted.first );\n const auto delay_end = std::find_if( envelope.begin(), envelope.end(), []( float v ) { return v != 0.f; } );\n const auto delay_size = std::distance( envelope.begin(), delay_end );\n pixels_begin += delay_size * x;\n envelope.erase( envelope.begin(), delay_end );\n y = envelope.size();\n const auto tail_blank_end = std::find_if( envelope.rbegin(), envelope.rend(), []( float v ) { return v != 0.f; } );\n const auto tail_blank_size = std::distance( envelope.rbegin(), tail_blank_end );\n envelope.resize( envelope.size() - tail_blank_size );\n y = envelope.size();\n std::tie( delay, attack, release ) = segment_envelope( envelope, a, b );\n delay_time = ( a * delay * delay + b * delay ) * tinyfm3::delta;\n attack_time = ( a * attack * attack + b * attack ) * tinyfm3::delta;\n release_time = ( a * release * release + b * release ) * tinyfm3::delta;\n total_time = ( a * envelope.size() * envelope.size() + b * envelope.size() ) * tinyfm3::delta;\n std::cout << __FILE__ << \" \" << __LINE__ << \" \" << delay_time << \" \" << attack_time << \" \" << release_time << \" \" << total_time << std::endl;\n}\nfloat get_distance(\n const spectrum_image &ref,\n const window_list_t &window,\n const std::vector< int16_t > &audio\n) {\n const float a = ref.get_a();\n const float b = ref.get_b();\n const auto converted = fftcomp( ref.get_pixels(), ref.get_height(), window, audio, ref.get_resolution(), a, b, ref.get_width() );\n float delay, attack, release;\n std::tie( delay, attack, release ) = segment_envelope( converted.second, ref.get_a(), ref.get_b() );\n double delay_time = ( a * delay * delay + b * delay ) * tinyfm3::delta;\n double attack_time = ( a * attack * attack + b * attack ) * tinyfm3::delta;\n double release_time = ( a * release * release + b * release ) * tinyfm3::delta;\n double delay_distance = std::abs( ref.get_delay_time() - delay_time );\n double attack_distance = std::abs( ref.get_attack_time() - attack_time );\n double release_distance = std::abs( ref.get_release_time() - release_time );\n// std::cout << delay_distance << \" \" << attack_distance << \" \" << release_distance << std::endl;\n return double( converted.first )/ref.get_width()/ref.get_height() * ( delay_distance * 40.f + attack_distance * 40.f + release_distance * 40.f + 1.f );\n}\n\n", "meta": {"hexsha": "141bb7e7b1f4b9088fdd79a817838d04a30eab7c", "size": 3141, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/spectrum_image.cpp", "max_stars_repo_name": "Fadis/genetic_fm", "max_stars_repo_head_hexsha": "415158b02e2c0dad8fafc81b5762b8889e493f10", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 34.0, "max_stars_repo_stars_event_min_datetime": "2016-10-08T08:55:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-17T03:19:54.000Z", "max_issues_repo_path": "src/spectrum_image.cpp", "max_issues_repo_name": "Fadis/genetic_fm", "max_issues_repo_head_hexsha": "415158b02e2c0dad8fafc81b5762b8889e493f10", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/spectrum_image.cpp", "max_forks_repo_name": "Fadis/genetic_fm", "max_forks_repo_head_hexsha": "415158b02e2c0dad8fafc81b5762b8889e493f10", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2018-06-21T00:06:30.000Z", "max_forks_repo_forks_event_max_datetime": "2018-06-21T00:06:30.000Z", "avg_line_length": 46.1911764706, "max_line_length": 153, "alphanum_fraction": 0.6711238459, "num_tokens": 842, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8824278664544911, "lm_q2_score": 0.5660185351961015, "lm_q1q2_score": 0.4994705283867921}} {"text": "#ifndef CHARGE_EV_PHEM_HPP\n#define CHARGE_EV_PHEM_HPP\n\n#include \"common/constant_function.hpp\"\n#include \"common/serialization.hpp\"\n\n#include \"ev/consumption_model.hpp\"\n\n#include \n\n#include \n\nnamespace charge::ev {\nnamespace detail {\n\nenum class PHEMVehicleType : std::int32_t {\n PEUGEOT_ION,\n EV_NO_AUX,\n EV_SPRING,\n EV_SUMMER,\n EV_WINTER,\n INVALID = -1\n};\n\nclass PHEMConsumptionModel : public ConsumptionModel {\n public:\n PHEMConsumptionModel(double speed_parameter, double slope_parameter, double const_parameter)\n : speed_parameter(speed_parameter), slope_parameter(slope_parameter),\n const_parameter(const_parameter) {}\n\n LimitedTradeoffFunction tradeoff_function(const double length, const double slope,\n const double min_speed,\n const double max_speed) const override final {\n assert(min_speed <= max_speed);\n assert(min_speed > 0);\n assert(max_speed < 300);\n\n const double limited_slope = std::max(-10.0, slope*100);\n // Original code has a scaling by 1000, this causes numeric problems.\n // const double a = 1000.0 * length * speed_parameter * (3.6 * 3.6 * length * length);\n // const double b = 0.0;\n // const double c = 1000.0 * length * (slope_parameter * limited_slope + const_parameter);\n const double a = length * speed_parameter * (3.6 * 3.6 * length * length);\n const double b = 0.0;\n const double c = length * (slope_parameter * limited_slope + const_parameter);\n const double min_duration = length * 3.6 / max_speed;\n const double max_duration = length * 3.6 / min_speed;\n assert(min_duration <= max_duration);\n assert(min_duration >= 0);\n\n assert(std::isfinite(a));\n assert(std::isfinite(b));\n assert(std::isfinite(c));\n assert(std::isfinite(min_duration));\n assert(std::isfinite(max_duration));\n\n LimitedTradeoffFunction tradeoff;\n if (a > 0)\n tradeoff = {min_duration, max_duration, common::HyperbolicFunction{a, b, c}};\n else\n tradeoff = {min_duration, max_duration, common::ConstantFunction{c}};\n\n // Only return a tradeoff function of the min and max speed are different\n if (max_speed - min_speed <= 1 || max_duration - min_duration < 1) {\n auto constant_consumption = tradeoff(tradeoff.min_x);\n return LimitedTradeoffFunction{min_duration, min_duration, common::ConstantFunction{constant_consumption}};\n } else {\n return tradeoff;\n }\n }\n\n private:\n double speed_parameter;\n double slope_parameter;\n double const_parameter;\n};\n\nstd::unique_ptr make_consumption_model(PHEMVehicleType type) {\n double speed_parameter, slope_parameter, const_parameter;\n\n switch (type) {\n case PHEMVehicleType::PEUGEOT_ION:\n speed_parameter = 0.00001084948;\n slope_parameter = 0.02863728;\n const_parameter = 0.08052179;\n break;\n case PHEMVehicleType::EV_NO_AUX:\n speed_parameter = 0.00001129197;\n slope_parameter = 0.05127720;\n const_parameter = 0.1247448;\n break;\n case PHEMVehicleType::EV_SPRING:\n speed_parameter = 0.00001105337;\n slope_parameter = 0.05135052;\n const_parameter = 0.1287824;\n break;\n case PHEMVehicleType::EV_SUMMER:\n speed_parameter = 0.000009192936;\n slope_parameter = 0.05194433;\n const_parameter = 0.1605070;\n break;\n case PHEMVehicleType::EV_WINTER:\n speed_parameter = 0.000004730422;\n slope_parameter = 0.05345339;\n const_parameter = 0.2379125;\n break;\n default:\n throw std::runtime_error(\"Unknown vehicle type\");\n }\n\n return std::make_unique(speed_parameter, slope_parameter,\n const_parameter);\n}\n}\n\nstd::unordered_map>\nmake_phem_consumption_models() {\n std::unordered_map> models;\n\n models[\"Peugeot Ion\"] =\n detail::make_consumption_model(detail::PHEMVehicleType::PEUGEOT_ION);\n models[\"EV No Aux\"] =\n detail::make_consumption_model(detail::PHEMVehicleType::EV_NO_AUX);\n models[\"EV Spring\"] =\n detail::make_consumption_model(detail::PHEMVehicleType::EV_SPRING);\n models[\"EV Summer\"] =\n detail::make_consumption_model(detail::PHEMVehicleType::EV_SUMMER);\n models[\"EV Winter\"] =\n detail::make_consumption_model(detail::PHEMVehicleType::EV_WINTER);\n\n return models;\n}\n}\n\n#endif\n", "meta": {"hexsha": "0cb764147902ec6b29ef11784fe8de8d63c4926b", "size": 4748, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/ev/phem.hpp", "max_stars_repo_name": "TheMarex/charge", "max_stars_repo_head_hexsha": "85e35f7a6c8b8c161ecd851124d1363d5a450573", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 13.0, "max_stars_repo_stars_event_min_datetime": "2018-03-09T14:37:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-27T06:56:35.000Z", "max_issues_repo_path": "include/ev/phem.hpp", "max_issues_repo_name": "AlexBlazee/charge", "max_issues_repo_head_hexsha": "85e35f7a6c8b8c161ecd851124d1363d5a450573", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/ev/phem.hpp", "max_forks_repo_name": "AlexBlazee/charge", "max_forks_repo_head_hexsha": "85e35f7a6c8b8c161ecd851124d1363d5a450573", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 10.0, "max_forks_repo_forks_event_min_datetime": "2018-04-14T02:27:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-13T23:30:44.000Z", "avg_line_length": 34.9117647059, "max_line_length": 119, "alphanum_fraction": 0.6575400168, "num_tokens": 1144, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8705972684083609, "lm_q2_score": 0.5736784074525098, "lm_q1q2_score": 0.4994428544730137}} {"text": "// Copyright (c) 2012-2017 VideoStitch SAS\n// Copyright (c) 2018 stitchEm\n\n#include \"eigengeometry.hpp\"\n\n#include \n\nnamespace VideoStitch {\nnamespace Calibration {\n\n#ifndef __clang_analyzer__ // VSA-7040\nvoid rotationFromEulerZXY(Eigen::Matrix3d& R, double yaw, double pitch, double roll) {\n Eigen::AngleAxisd X(-pitch, Eigen::Vector3d::UnitX());\n Eigen::AngleAxisd Y(-yaw, Eigen::Vector3d::UnitY());\n Eigen::AngleAxisd Z(-roll, Eigen::Vector3d::UnitZ());\n\n R = Z.matrix() * X.matrix() * Y.matrix();\n}\n#endif // __clang_analyzer__\n\nvoid EulerZXYFromRotation(Eigen::Vector3d& vr, const Eigen::Matrix3d& R) {\n vr(0) = atan2(R(2, 0), R(2, 2));\n vr(1) = -asin(R(2, 1));\n vr(2) = atan2(R(0, 1), R(1, 1));\n}\n\n} // namespace Calibration\n} // namespace VideoStitch\n", "meta": {"hexsha": "e042878b7235d35cd96c96840d4b77a8d70185fd", "size": 784, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "lib/src/calibration/eigengeometry.cpp", "max_stars_repo_name": "tlalexander/stitchEm", "max_stars_repo_head_hexsha": "cdff821ad2c500703e6cb237ec61139fce7bf11c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 182.0, "max_stars_repo_stars_event_min_datetime": "2019-04-19T12:38:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T16:48:20.000Z", "max_issues_repo_path": "lib/src/calibration/eigengeometry.cpp", "max_issues_repo_name": "doymcc/stitchEm", "max_issues_repo_head_hexsha": "20693a55fa522d7a196b92635e7a82df9917c2e2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 107.0, "max_issues_repo_issues_event_min_datetime": "2019-04-23T10:49:35.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-02T18:12:28.000Z", "max_forks_repo_path": "lib/src/calibration/eigengeometry.cpp", "max_forks_repo_name": "doymcc/stitchEm", "max_forks_repo_head_hexsha": "20693a55fa522d7a196b92635e7a82df9917c2e2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 59.0, "max_forks_repo_forks_event_min_datetime": "2019-06-04T11:27:25.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T23:49:49.000Z", "avg_line_length": 27.0344827586, "max_line_length": 86, "alphanum_fraction": 0.6772959184, "num_tokens": 268, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8479677737461007, "lm_q2_score": 0.588889130767832, "lm_q1q2_score": 0.4993590052004749}} {"text": "#include \"comparator.h\"\n#include \"tools.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace he_cmp;\n\n// polynomial coefficients of bivariate polynomial decomposition as in Theorem 2 for different plaintext moduli\nmap>> fcoefs {\n {11,\n \t{\n \t\t{-4, 4, 2, -2, -2, 2, -4, -1},\n \t\t{5, -5, 2, -2, -3, 4},\n \t\t{-4, 4, 4, 4},\n \t\t{2, 5},\n \t\t{-1}\n \t}\n },\n {13,\n \t{\n \t\t{-5, 5, 5, -5, -4, 4, 6, -6, -5, -1},\n \t\t{3, -3, -5, 5, 4, -4, -4, 5},\n \t\t{4, -4, -6, 6, -5, 1},\n \t\t{3, -3, 6, 1},\n \t\t{2, 6},\n \t\t{1}\n \t}\n },\n {17, \n {\n \t{-4, 4, -8, 8, 3, -3, 7, -7, -8, 8, -4, 4, -7, -1},\n \t{4, -4, -1, 1, 4, -4, -4, 4, -7, 7, -6, 7},\n \t{-6, 6, -4, 4, -7, 7, 7, -7, 3, 8},\n \t{-4, 4, 3, -3, -2, 2, 3, 4},\n \t{2, -2, -4, 4, 2, 2},\n \t{-4, 4, 7, 8},\n \t{-3, 5},\n \t{1}\n }\n },\n {19,\n \t{\n \t\t{4, -4, 6, -6, 8, -8, -1, 1, 6, -6, 1, -1, 8, -8, -8, -1},\n\t\t{-2, 2, -6, 6, 2, -2, -7, 7, -1, 1, -5, 5, -7, 8},\n \t{2, -2, -9, 9, 2, -2, -1, 1, 7, -7, 9, 3},\n \t{9, -9, -7, 7, 6, -6, 5, -5, -8, -4},\n \t{7, -7, -9, 9, 4, -4, -7, 9},\n \t{5, -5, 3, -3, -9, -1},\n \t{7, -7, 5, -9},\n \t{-3, -4},\n \t{-1}\n \t}\n },\n {23,\n \t{\n \t\t{8, -8, -1, 1, -11, 11, 7, -7, -3, 3, -9, 9, 1, -1, 7, -7, -6, 6, -10, -1},\n\t\t{9, -9, 6, -6, -2, 2, -8, 8, -1, 1, 8, -8, -11, 11, 1, -1, -9, 10},\n\t\t{-10, 10, 4, -4, 3, -3, 0, 0, 7, -7, 2, -2, 7, -7, 2, -11},\n\t\t{5, -5, 9, -9, 1, -1, 0, 0, -8, 8, 2, -2, 10, -3},\n\t\t{-10, 10, -6, 6, -7, 7, -2, 2, 2, -2, -9, 7},\n\t\t{6, -6, -3, 3, -2, 2, -11, 11, 5, -8},\n\t\t{10, -10, -10, 10, 6, -6, -2, -2},\n\t\t{-6, 6, 5, -5, 10, -8},\n\t\t{10, -10, -2, -5},\n\t\t{4, -1},\n\t\t{-1}\n \t}\n },\n {29,\n \t{\n \t\t{2,-2,-6,6,-2,2,14,-14,5,-5,-4,4,-10,10,-4,4,-1,1,0,0,-9,9,-8,8,-13,-1},\n\t\t{8,-8,0,0,10,-10,-12,12,4,-4,11,-11,-2,2,3,-3,-6,6,-3,3,-14,14,-12,13},\n\t\t{4,-4,9,-9,4,-4,12,-12,-11,11,0,0,-11,11,-14,14,-5,5,7,-7,1,-13},\n\t\t{13,-13,0,0,10,-10,-13,13,5,-5,-7,7,-2,2,7,-7,12,-12,-6,13},\n\t\t{-9,9,-14,14,3,-3,-7,7,-13,13,-1,1,7,-7,5,-5,-6,-2},\n\t\t{9,-9,7,-7,-13,13,-3,3,-8,8,-10,10,12,-12,-2,10},\n\t\t{-6,6,8,-8,3,-3,12,-12,-14,14,14,-14,6,-9},\n\t\t{-6,6,7,-7,-5,5,-1,1,-10,10,-14,4},\n\t\t{3,-3,2,-2,-9,9,10,-10,12,12},\n\t\t{-1,1,-6,6,-9,9,-2,-10},\n\t\t{-9,9,-12,12,-14,1},\n\t\t{-1,1,-4,-13},\n\t\t{-5,-6},\n\t\t{1}\n \t}\n },\n {31,\n \t{\n \t\t{-14,14,9,-9,-5,5,4,-4,-9,9,-1,1,-15,15,-11,11,9,-9,0,0,-1,1,13,-13,12,-12,-14,-1},\n\t\t{8,-8,5,-5,9,-9,-1,1,-10,10,12,-12,15,-15,10,-10,-2,2,8,-8,9,-9,-10,10,-13,14},\n\t\t{-14,14,-15,15,-13,13,-10,10,2,-2,13,-13,0,0,-5,5,0,0,-12,12,7,-7,11,7},\n\t\t{-9,9,-6,6,6,-6,-8,8,-11,11,-2,2,-13,13,5,-5,14,-14,-4,4,8,-1},\n\t\t{-13,13,12,-12,-6,6,10,-10,-13,13,10,-10,-1,1,8,-8,-11,11,9,12},\n\t\t{-8,8,9,-9,-4,4,-9,9,-13,13,2,-2,5,-5,-15,15,-12,-15},\n\t\t{-14,14,3,-3,-10,10,-2,2,-4,4,10,-10,9,-9,-12,-6},\n\t\t{-4,4,6,-6,-2,2,-7,7,-1,1,9,-9,12,-10},\n\t\t{-11,11,10,-10,-9,9,-12,12,8,-8,-7,-11},\n\t\t{9,-9,0,0,12,-12,9,-9,3,-6},\n\t\t{-1,1,-9,9,-3,3,2,3},\n\t\t{-14,14,6,-6,15,-14},\n\t\t{-1,1,-12,-11},\n\t\t{-5,9},\n\t\t{-1}\n \t}\n }\n};\n\nDoubleCRT Comparator::create_shift_mask(double& size, long shift)\n{\n\tcout << \"Mask for shift \" << shift << \" is being created\" << endl;\n\t// get EncryptedArray\n \tconst EncryptedArray& ea = m_context.getEA();\n\n \t//extract slots\n\tlong nSlots = ea.size();\n\n\t//number of batches in one slot\n\tlong batch_size = nSlots / m_expansionLen;\n\n\t// create a mask vector\n\tvector mask_vec(nSlots,1);\n\n\t//starting position of all batches\n\tlong start = 0;\n\n\t// set zeros in the unused slots\n\tlong nEndZeros = nSlots - batch_size * m_expansionLen;\n\tfor (int i = 1; i <= nEndZeros; i++)\n\t{\n\tlong indx = (start + nSlots - i) % nSlots;\n\tmask_vec[indx] = 0;\n\t}\n\n\t// masking values rotated outside their batches\n\tfor (long i = 0; i < batch_size; i++)\n\t{\n\tif (shift < 0)\n\t{\n\t for (long j = 0; j < -shift; j++)\n\t {\n\t long indx = (start + (i + 1) * m_expansionLen - j - 1) % nSlots;\n\t mask_vec[indx] = 0;\n\t }\n\t}\n\telse if (shift > 0)\n\t{\n\t for (long j = 0; j < shift; j++)\n\t {\n\t long indx = (start + i * m_expansionLen + j) % nSlots;\n\t mask_vec[indx] = 0;\n\t }\n\t}\n\t}\n\tZZX mask_zzx;\n\tea.encode(mask_zzx, mask_vec);\n\n\tsize = conv(embeddingLargestCoeff(mask_zzx, m_context.getZMStar()));\n\n\tDoubleCRT mask_crt = DoubleCRT(mask_zzx, m_context, m_context.allPrimes());\n\treturn mask_crt;\n}\n\nvoid Comparator::create_all_shift_masks()\n{\n\tlong shift = 1;\n\twhile (shift < m_expansionLen)\n\t{\n\t\tdouble size;\n\t DoubleCRT mask_ptxt = create_shift_mask(size, -shift);\n\t m_mulMasks.push_back(mask_ptxt);\n\t m_mulMasksSize.push_back(size);\n\n\t shift <<=1;\n\t}\n\tcout << \"All masks are created\" << endl;\n}\n\nvoid Comparator::compute_poly_params()\n{\n\t// get p\n\tZZ p = ZZ(m_context.getP());\n\tlong p_long = conv(p);\n\n\t// hardcoded babysteps sizes\n\tmap bs_nums\n\t{\n\t\t{5, 1},\n\t\t{7, 2}, // 4\n\t\t{11, 3}, // 3 (6), 1..4\n \t\t{13, 3}, // 3 (6), 1..5\n \t\t{17, 4}, // 4 (7), 1..5\n \t\t{19, 3}, // 3 (8), 1..4\n \t\t{23, 5}, // 5 (9), 3..6\n \t\t{29, 5}, // 5 (10), 1..6\n \t\t{31, 5}, // 5 (10), 4..6\n \t\t{37, 5}, // 5 (12)\n \t\t{47, 5}, // 5 (13), 2..11 \n \t\t{61, 6}, // 6 (14), 4..8 \n \t\t{67, 5}, // 5 (15), 4..8\n \t\t{71, 4}, // 4 (15), 3..7\n \t\t{101, 7}, // 7 (16), 4..8\n \t\t{109, 7}, // 7 (19)\n \t\t{131, 8}, // 8 (19), 4..11\n \t\t{167, 10}, // 10 (21), 8..12\n \t\t{173, 10}, // 10 (21), 8..12\n \t\t{271, 9}, // 9 (26), 9..10\n \t\t{401, 12}, // 12 (28), 9..14\n \t\t{659, 11}\t// 11 (41), 11..12\n \t};\n\n \tm_bs_num_comp = -1;\n \tm_bs_num_min = -1;\n \tif(bs_nums.count(p_long) > 0)\n \t{\n \t\tm_bs_num_comp = bs_nums[p_long];\n \t\tm_bs_num_min = bs_nums[p_long];\n \t}\n\n \t// if p > 3, d = (p-3)/2\n \tlong d_comp = deg(m_univar_less_poly);\n \t// if p > 3, d = (p-1)/2\n \tlong d_min = deg(m_univar_min_max_poly);\n\n \t// How many baby steps: set sqrt(d/2), rounded up/down to a power of two\n\n\t// FIXME: There may be some room for optimization here: it may be possible to choose this number as something other than a power of two and still maintain optimal depth, in principle we can try all possible values of m_babystep_num between two consecutive powers of two and choose the one that gives the least number of multiplies, conditioned on minimum depth.\n\n \tif (m_bs_num_comp <= 0) \n\t{\n\t\tlong kk = static_cast(sqrt(d_comp/2.0)); //sqrt(d/2)\n\t\tm_bs_num_comp = 1L << NextPowerOfTwo(kk);\n\n \t// heuristic: if #baby_steps >> kk then use a smaler power of two\n \tif ((m_bs_num_comp==16 && d_comp>167) || (m_bs_num_comp>16 && m_bs_num_comp>(1.44*kk)))\n \t\tm_bs_num_comp /= 2;\n \t}\n \tif (m_bs_num_min <= 0) \n\t{\n\t\tlong kk = static_cast(sqrt(d_min/2.0)); //sqrt(d/2)\n\t\tm_bs_num_min = 1L << NextPowerOfTwo(kk);\n\n \t// heuristic: if #baby_steps >> kk then use a smaler power of two\n \tif ((m_bs_num_min==16 && d_min>167) || (m_bs_num_min>16 && m_bs_num_min>(1.44*kk)))\n \t\tm_bs_num_min /= 2;\n \t}\n\n\tif(m_verbose)\n\t{\n\t\tcout << \"Number of baby steps for comparison: \" << m_bs_num_comp << endl;\n\t\tcout << \"Number of baby steps for min/max: \" << m_bs_num_min << endl;\n\t}\n\n\t// #giant_steps = ceil(d/#baby_steps), d >= #giant_steps * #baby_steps\n\tm_gs_num_comp = divc(d_comp,m_bs_num_comp);\n\tm_gs_num_min = divc(d_min,m_bs_num_min);\n\n\tif(m_verbose)\n\t{\n\t\tcout << \"Number of giant steps for comparison: \" << m_bs_num_comp << endl;\n\t\tcout << \"Number of giant steps for min/max: \" << m_bs_num_min << endl;\n\t} \n\n\t// If #giant_steps is not a power of two, ensure that poly is monic and that\n\t// its degree is divisible by #baby_steps, then call the recursive procedure\n\n\t// top coefficient is equal to (p^2 - 1)/8 mod p\n\t// its inverse is equal to -8 mod p\n\tm_top_coef_comp = LeadCoeff(m_univar_less_poly);\n\tm_top_coef_min = LeadCoeff(m_univar_min_max_poly);\n\tZZ topInv_comp = ZZ(-8) % p; // the inverse mod p of the top coefficient of poly (if any)\n\tZZ topInv_min = ZZ(-8) % p; // the inverse mod p of the top coefficient of poly (if any)\n\tbool divisible_comp = (m_gs_num_comp * m_bs_num_comp == d_comp); // is the degree divisible by #baby_steps?\n\tbool divisible_min = (m_gs_num_min * m_bs_num_min == d_min); // is the degree divisible by #baby_steps?\n\n\t// FIXME: There may be some room for optimization below: instead of\n\t// adding a term X^{n*k} we can add X^{n'*k} for some n'>n, so long\n\t// as n' is smaller than the next power of two. We could save a few\n\t// multiplications since giantStep[n'] may be easier to compute than\n\t// giantStep[n] when n' has fewer 1's than n in its binary expansion.\n\n\tm_extra_coef_comp = ZZ::zero(); // extra!=0 denotes an added term extra*X^{#giant_steps * #baby_steps}\n\tm_extra_coef_min = ZZ::zero(); // extra!=0 denotes an added term extra*X^{#giant_steps * #baby_steps}\n\n\tif (m_gs_num_comp != (1L << NextPowerOfTwo(m_gs_num_comp)))\n\t{\n\t\tif (!divisible_comp) \n\t\t{ // need to add a term\n\t \tm_top_coef_comp = NTL::to_ZZ(1); // new top coefficient is one\n\t \ttopInv_comp = m_top_coef_comp; // also the new inverse is one\n\t \t// set extra = 1 - current-coeff-of-X^{n*k}\n\t \tm_extra_coef_comp = SubMod(m_top_coef_comp, coeff(m_univar_less_poly, m_gs_num_comp * m_bs_num_comp), p);\n\t \tSetCoeff(m_univar_less_poly, m_gs_num_comp * m_bs_num_comp); // set the top coefficient of X^{n*k} to one\n\t\t}\n\n\t\tif (!IsOne(m_top_coef_comp)) \n\t\t{\n\t \tm_univar_less_poly *= topInv_comp; // Multiply by topInv to make into a monic polynomial\n\t \tfor (long i = 0; i <= m_gs_num_comp * m_bs_num_comp; i++) rem(m_univar_less_poly[i], m_univar_less_poly[i], p);\n\t \tm_univar_less_poly.normalize();\n\t\t}\n\t}\n\n\t/*\n\tcout << \"Less-than poly: \";\n\tprintZZX(cout, m_univar_less_poly, conv(p));\n\tcout << endl;\n\t*/\n\n\tif (m_gs_num_min != (1L << NextPowerOfTwo(m_gs_num_min)))\n\t{\n\t\tif (!divisible_min) \n\t\t{ // need to add a term\n\t \tm_top_coef_min = NTL::to_ZZ(1); // new top coefficient is one\n\t \ttopInv_min = m_top_coef_min; // also the new inverse is one\n\t \t// set extra = 1 - current-coeff-of-X^{n*k}\n\t \tm_extra_coef_min = SubMod(m_top_coef_min, coeff(m_univar_min_max_poly, m_gs_num_min * m_bs_num_min), p);\n\t \tSetCoeff(m_univar_min_max_poly, m_gs_num_min * m_bs_num_min); // set the top coefficient of X^{n*k} to one\n\t\t}\n\n\t\tif (!IsOne(m_top_coef_min)) \n\t\t{\n\t \tm_univar_min_max_poly *= topInv_min; // Multiply by topInv to make into a monic polynomial\n\t \tfor (long i = 0; i <= m_gs_num_min * m_bs_num_min; i++) rem(m_univar_min_max_poly[i], m_univar_min_max_poly[i], p);\n\t \tm_univar_min_max_poly.normalize();\n\t\t}\n\t}\n\n\t/*\n\tcout << \"Min-max poly: \";\n\tprintZZX(cout, m_univar_min_max_poly, conv(p));\n\tcout << endl;\n\t*/\n\n\tlong top_deg = conv(p-1) >> 1;\n\tm_baby_index = top_deg % m_bs_num_comp;\n\tm_giant_index = top_deg / m_bs_num_comp;\n\tif(m_baby_index == 0)\n\t{\n\t\tm_baby_index = m_bs_num_comp;\n\t\tm_giant_index -= 1;\n\t}\n}\n\nvoid Comparator::create_poly()\n{\n\tcout << \"Creating comparison polynomial\" << endl;\n\t// get p\n\tunsigned long p = m_context.getP();;\n\n\tif(m_type == UNI)\n\t{\n\t\t// polynomial coefficient\n\t\tZZ_p coef;\n\t\tcoef.init(ZZ(p));\n\n\t\t// field element\n\t\tZZ_p field_elem;\n\t\tfield_elem.init(ZZ(p));\n\n\t\t// initialization of the univariate comparison polynomial\n\t\tm_univar_less_poly = ZZX(INIT_MONO, 0, 0);\n\n\t\t// loop over all odd coefficient indices\n\t\tfor (long indx = 1; indx < p - 1; indx+=2)\n\t\t{ \n\t\t\t// coefficient f_i = sum_a a^{p-1-indx} where a runs over [1,...,(p-1)/2]\n\t\t\tcoef = 1;\n\t\t\tfor(long a = 2; a <= ((p-1) >> 1); a++)\n\t\t\t{\n\t\t\t field_elem = a;\n\t\t\t coef += power(field_elem, p - 1 - indx);\n\t\t\t}\n\n\t\t\tm_univar_less_poly += ZZX(INIT_MONO, (indx-1) >> 1, rep(coef));\n\t\t}\n\n\t\t/*\n\t\tcout << \"Less-than poly: \";\n\t\tprintZZX(cout, m_univar_less_poly, p);\n\t\tcout << endl;\n\t\t*/\n\n\t\tm_univar_min_max_poly = m_univar_less_poly * ZZX(INIT_MONO, 1, 1);\n\n\t\t/*\n\t\tcout << \"Min-max poly: \";\n\t\tprintZZX(cout, m_univar_min_max_poly, p);\n\t\tcout << endl;\n\t\t*/\n\n\t\tcompute_poly_params();\n\t}\n\telse if (m_type == TAN)\n\t{\n\t\t// computing the coefficients of the bivariate polynomial of Tan et al.\n\t\tm_bivar_less_coefs.SetDims(p,p);\n\n\t\t// y^{p-1}\n\t\tm_bivar_less_coefs[0][p-1] = ZZ(1);\n\n\t\t// (p+1)/2 * x^{(p-1)/2} * y^{(p-1)/2}\n\t\tm_bivar_less_coefs[(p-1) >> 1][(p-1) >> 1] = ZZ((p+1) >> 1);\n\n\t\t// iterator\n\t\tZZ_p field_elem;\n\t\tfield_elem.init(ZZ(p));\n\n\t\t// inner sum\n\t\tZZ_p inner_sum;\n\t\tinner_sum.init(ZZ(p));\n\t\t\n\t\t// outer sum\n\t\tZZ_p outer_sum;\n\t\touter_sum.init(ZZ(p));\n\n\t\tfor (long i = 1; i < p; i++)\n\t\t{\n\t\t\tfor (long j = 1; j < p; j++)\n\t\t\t{\n\t\t\t\t// x^i * y^i have the zero coefficient except for i = (p-1)/2\n\t\t\t\tif (i == j)\n\t\t\t\t\tcontinue;\n\n\t\t\t\touter_sum = 0;\n\t\t\t\t// sum_{a=1}^{p-1} a^{p-1-i} sum_{b = a+1}^{p-1} b^{p-1-j} \n\t\t\t\tfor (long a = 1; a < p; a++)\n\t\t\t\t{\n\t\t\t\t\tinner_sum = 0;\n\t\t\t\t\t// sum_{b = a+1}^{p-1} b^{p-1-j} \n\t\t\t\t\tfor (long b = a+1; b < p; b++)\n\t\t\t\t\t{\n\t\t\t\t\t\t// b^{p-1-j}\n\t\t\t\t\t\tfield_elem = b;\n\t\t\t\t\t\tfield_elem = power(field_elem, p - 1 - j);\n\n\t\t\t\t\t\tinner_sum += field_elem;\n\t\t\t\t\t}\n\t\t\t\t\t// a^{p-1-i}\n\t\t\t\t\tfield_elem = a;\n\t\t\t\t\tfield_elem = power(field_elem, p - 1 - i);\n\n\t\t\t\t\tinner_sum *= field_elem;\n\t\t\t\t\touter_sum += inner_sum;\n\t\t\t\t}\n\t\t\t\tm_bivar_less_coefs[i][j] = rep(outer_sum);\n\t\t\t}\n\t\t}\n\n\t\tcout << \"Bivariate coefficients\" << endl << m_bivar_less_coefs << endl;\n\n\t\tif (m_verbose)\n\t\t{\n\t\t\tcout << \"Comparison polynomial: \" << endl;\n\t\t\tprintZZX(cout, m_univar_less_poly, (p-1)>>1);\n\t\t\tcout << endl;\n\t\t}\n\t}\n\n\tcout << \"Comparison polynomial is created\" << endl;\n}\n\nvoid Comparator::find_prim_root(ZZ_pE& root) const\n{\n\tZZ qm1 = root.cardinality() - 1;\n\n\tcout << \"Slot order: \" << qm1 << endl;\n\tcout << \"Slot poly: \" << root.modulus() << endl;\n\n\tvector facts;\n\tfactorize(facts, qm1); // factorization of slot order\n\n\tNTL::set(root);\n\n\tfor (unsigned long i = 0; i < facts.size(); i++) \n\t{\n\t\tZZ p = facts[i];\n\t\tZZ pp = p;\n\t\tZZ ee = qm1 / p;\n\t\twhile (ee % p == 0) \n\t\t{\n\t \t\tee = ee / p;\n\t \t\tpp = pp * p;\n\t\t}\n\t\t// so now we have e = pp * ee, where pp is\n\t\t// the power of p that divides e.\n\t\t// Our goal is to find an element of order pp\n\n\t\tNTL::PrimeSeq s;\n\t\tZZ_pE q = root;\n\t\tZZ_pE qq = root;\n\t\tZZ_pE qq1 = root;\n\t\tlong iter = 0;\n\t\tdo \n\t\t{\n\t \t\titer++;\n\t \t\tif (iter > 1000000)\n\t \t\tthrow RuntimeError(\"FindPrimitiveRoot: possible infinite loop?\");\n\t \t\trandom(q);\n\t \t\tNTL::conv(qq, q);\n\t \t\tpower(qq1, qq, qm1 / p);\n\t\t} \n\t\twhile (IsOne(qq1));\n\t\tpower(qq1, qq, qm1 / pp); // qq1 has order pp\n\n\t\tmul(root, root, qq1);\n\t}\n\n\t// independent check that we have an e-th root of unity\n\t{\n\t\tZZ_pE s;\n\n\t\tpower(s, root, qm1);\n\t\tif (!IsOne(s))\n\t \t\tthrow RuntimeError(\"FindPrimitiveRoot: internal error (1)\");\n\n\t\t// check that s^{e/p} != 1 for any prime divisor p of e\n\t\tfor (unsigned long i = 0; i < facts.size(); i++) \n\t\t{\n\t \t\tZZ e2 = qm1 / facts[i];\n\t \t\tpower(s, root, e2); // s = root^{e/p}\n\t \t\tif (IsOne(s))\n\t \t\tthrow RuntimeError(\"FindPrimitiveRoot: internal error (2)\");\n\t\t}\n\t}\n}\n\nvoid Comparator::extraction_init()\n{\n\t// get the total number of slots\n\tconst EncryptedArray& ea = m_context.getEA();\n\tlong nslots = ea.size();\n\n\t// get p\n\tlong p = m_context.getP();\n\t//cout << \"p: \" << p << endl;\n\n\t// get the order of p\n\tlong d = m_context.getOrdP();\n\n\t// get the defining polynomial of a slot mod p\n\tZZX def_poly = m_context.getSlotRing()->G;\n\n\t//cout << \"Def. poly\" << def_poly << endl;\n\n\tZZ_pX def_poly_p;\n\tfor (long iCoef = 0; iCoef <= deg(def_poly); iCoef++)\n\t{\n\t\tZZ_p coef;\n\t\tcoef.init(ZZ(p));\n\t\tcoef = conv(def_poly[iCoef]);\n\t\tSetCoeff(def_poly_p, iCoef, coef);\n\t}\n\t//cout << \"Def. poly mod p \" << def_poly_p << endl;\n\t\n\t// build the trace matrix\n\tmat_ZZ_pE trace_matrix;\n\tmat_ZZ_pE inv_trace_matrix;\n\ttrace_matrix.SetDims(d, d);\n\t\n\tZZ_pE prim_elem;\n\tprim_elem.init(def_poly_p);\n\tprim_elem = conv(ZZ_pX(INIT_MONO, 1, 1));\n\n\t//find_prim_root(prim_elem);\n\t//cout << \"Primitive element \" << prim_elem << endl;\n\n\tZZ_pE coef;\n\tcoef.init(def_poly_p);\n\n\tfor (long iRow = 0; iRow < d; iRow++)\n\t{\n\t\tfor (long iCol = 0; iCol < d; iCol++)\n\t\t{\n\t\t\t// x^(iRow * p^iCol)\n\t\t\tcoef = power(prim_elem, iRow * power_long(p, iCol));\n\t\t\ttrace_matrix[iRow][iCol] = coef;\n\t\t}\n\t}\n\t//cout << \"Trace matrix: \" << trace_matrix << endl;\n\t//cout << \"Modulus: \" << trace_matrix[0][0].modulus() << endl; \n\n\tinv_trace_matrix = NTL::inv(trace_matrix);\n\t//cout << \"Inverse of trace matrix\" << inv_trace_matrix << endl;\n\t\t\n\n\t//cout << \"Extraction consts: \" << endl;\n\tfor (long iCoef = 0; iCoef < d; iCoef++)\n\t{\n\t\tvector tmp_crt_vec;\n\t\tvector size_vec;\n\n\t\tfor (long iFrob = 0; iFrob < d; iFrob++)\n\t\t{\n\t\t\tZZX tmp = conv(rep(inv_trace_matrix[iFrob][iCoef]));\n\n\t\t\t//cout << tmp << endl;\n\n\t\t\tvector vec_const(nslots, tmp);\n\t\t\tea.encode(tmp, vec_const);\n\n\t\t\tDoubleCRT tmp_crt(tmp, m_context, m_context.allPrimes());\n\n\t\t\tdouble const_size = conv(embeddingLargestCoeff(tmp, m_context.getZMStar()));\n\t\t\tsize_vec.push_back(const_size);\n\n\t\t\ttmp_crt_vec.push_back(tmp_crt);\n\t\t}\n\t\tm_extraction_const.push_back(tmp_crt_vec);\n\t\tm_extraction_const_size.push_back(size_vec);\n\t}\n}\n\nvoid Comparator::extract_mod_p(vector& mod_p_coefs, const Ctxt& ctxt_x) const\n{\n\tHELIB_NTIMER_START(Extraction);\n\tmod_p_coefs.clear();\n\n\tif (m_slotDeg == 1)\n\t{\n\t\tmod_p_coefs.push_back(ctxt_x);\n\t\treturn;\n\t}\n\n\tconst EncryptedArray& ea = m_context.getEA();\n\tlong nslots = ea.size();\n\n\t// get max slot degree\n\tlong d = m_context.getOrdP();\n\n\t// TODO: how to use key switching hoisting from CRYPTO'18?\n\tvector ctxt_frob(d-1, ctxt_x);\n\tfor(long iFrob = 1; iFrob < d; iFrob++)\n\t{\n\t\tctxt_frob[iFrob-1].frobeniusAutomorph(iFrob);\n\t} \n\n\tfor(long iCoef = 0; iCoef < m_slotDeg; iCoef++)\n\t{\n\t\t//cout << \"Extract coefficient \" << iCoef << endl;\n\t\tCtxt mod_p_ctxt = ctxt_x;\n\n\t\tmod_p_ctxt.multByConstant(m_extraction_const[iCoef][0], m_extraction_const_size[iCoef][0]);\n\n\t\tfor(long iFrob = 1; iFrob < d; iFrob++)\n\t\t{\n\t\t\tCtxt tmp = ctxt_frob[iFrob-1];\n\t\t\ttmp.multByConstant(m_extraction_const[iCoef][iFrob], m_extraction_const_size[iCoef][iFrob]);\n\t\t\tmod_p_ctxt += tmp; \n\t\t}\n\t\tmod_p_coefs.push_back(mod_p_ctxt);\n\t}\n\tHELIB_NTIMER_STOP(Extraction);\n}\n\nComparator::Comparator(const Context& context, CircuitType type, unsigned long d, unsigned long expansion_len, const SecKey& sk, bool verbose): m_context(context), m_type(type), m_slotDeg(d), m_expansionLen(expansion_len), m_sk(sk), m_pk(sk), m_verbose(verbose)\n{\n\t//determine the order of p in (Z/mZ)*\n\tunsigned long ord_p = context.getOrdP();\n\t//check that the extension degree divides the order of p\n\tif (ord_p < d != 0)\n\t{\n\t\tthrow invalid_argument(\"Field extension must be larger than the order of the plaintext modulus\\n\");\n\t}\n\n\tcreate_all_shift_masks();\n\tcreate_poly();\n\textraction_init();\n}\n\nconst DoubleCRT& Comparator::get_mask(double& size, long index) const\n{\n\tsize = m_mulMasksSize[index];\n\treturn m_mulMasks[index];\n}\n\nconst ZZX& Comparator::get_less_than_poly() const\n{\n\treturn m_univar_less_poly;\n}\n\nconst ZZX& Comparator::get_min_max_poly() const\n{\n\treturn m_univar_min_max_poly;\n}\n\nvoid Comparator::print_decrypted(const Ctxt& ctxt) const\n{\n\t// get EncryptedArray\n\tconst EncryptedArray& ea = m_context.getEA();\n\n\t// get order of p\n\tunsigned long ord_p = m_context.getOrdP();\n\n long nSlots = ea.size();\n vector decrypted(nSlots);\n ea.decrypt(ctxt, m_sk, decrypted);\n\n for(int i = 0; i < nSlots; i++)\n {\n printZZX(cout, decrypted[i], ord_p);\n cout << endl;\n }\n}\n\nvoid Comparator::batch_shift(Ctxt& ctxt, long start, long shift) const\n{\n\tHELIB_NTIMER_START(BatchShift);\n\t// get EncryptedArray\n\tconst EncryptedArray& ea = m_context.getEA();\n\t\n\t// if shift is zero, do nothing\n\tif(shift == 0)\n\t\treturn;\n\n\t// left cyclic rotation\n\tea.rotate(ctxt, shift);\n\n\t// masking elements shifted out of batch\n\tlong index = static_cast(intlog(2, -shift));\n\t//cout << \"Mask index: \" << index << endl;\n\tdouble size;\n\tDoubleCRT mask = get_mask(size, index);\n\tctxt.multByConstant(mask, size);\n\tHELIB_NTIMER_STOP(BatchShift);\n}\n\nvoid Comparator::batch_shift_for_mul(Ctxt& ctxt, long start, long shift) const\n{\n\tHELIB_NTIMER_START(BatchShiftForMul);\n\t// get EncryptedArray\n\tconst EncryptedArray& ea = m_context.getEA();\n\t\n\t// if shift is zero, do nothing\n\tif(shift == 0)\n\t\treturn;\n\t// left cyclic rotation\n\tea.rotate(ctxt, shift);\n\t\n\tlong index = static_cast(intlog(2, -shift));\n\t//cout << \"Mask index: \" << index << endl;\n\tdouble mask_size;\n\tDoubleCRT mask = get_mask(mask_size, index);\n\tctxt.multByConstant(mask, mask_size);\n\n\t// add 1 to masked slots\n\tctxt.addConstant(ZZ(1));\n\tmask.Negate();\n\tctxt.addConstant(mask, mask_size);\n\n\tHELIB_NTIMER_STOP(BatchShiftForMul);\n}\n\nvoid Comparator::shift_and_add(Ctxt& x, long start, long shift_direction) const\n{\n HELIB_NTIMER_START(ShiftAdd);\n long shift_sign = -1;\n if(shift_direction)\n shift_sign = 1;\n\n long e = 1;\n\n // shift and add\n while (e < m_expansionLen){\n Ctxt tmp = x;\n batch_shift(tmp, start, e * shift_sign);\n x += tmp;\n e <<=1;\n }\n HELIB_NTIMER_STOP(ShiftAdd);\n}\n\nvoid Comparator::shift_and_mul(Ctxt& x, long start, long shift_direction) const\n{\n HELIB_NTIMER_START(ShiftMul);\n long shift_sign = -1;\n if(shift_direction)\n shift_sign = 1;\n\n long e = 1;\n\n // shift and add\n while (e < m_expansionLen){\n Ctxt tmp = x;\n batch_shift_for_mul(tmp, start, e * shift_sign);\n x.multiplyBy(tmp);\n e <<=1;\n }\n HELIB_NTIMER_STOP(ShiftMul);\n}\n\nvoid Comparator::mapTo01_subfield(Ctxt& ctxt, long pow) const\n{\n HELIB_NTIMER_START(MapTo01);\t\n // get EncryptedArray\n const EncryptedArray& ea = m_context.getEA();\n\n // get p\n long p = ctxt.getPtxtSpace();\n if (p != ea.getPAlgebra().getP()) // ptxt space is p^r for r>1\n throw helib::LogicError(\"mapTo01 not implemented for r>1\");\n\n if (p % pow != 0)\n \tthrow helib::LogicError(\"Exponent must divide p\");\n\n if (p > 2)\n ctxt.power((p - 1) / pow); // set y = x^{p-1}\n\n HELIB_NTIMER_STOP(MapTo01);\n}\n\nvoid Comparator::less_than_mod_2(Ctxt& ctxt_res, const Ctxt& ctxt_x, const Ctxt& ctxt_y) const\n{\n\t//Comp(x,y) = y(x+1)\n\tcout << \"Compute comparison polynomial\" << endl;\n\n\t// x + 1\n\tCtxt x_plus_1 = ctxt_x;\n\tx_plus_1.addConstant(ZZ(1));\n\n\t// y(x+1)\n\tctxt_res = ctxt_y;\n\tctxt_res.multiplyBy(x_plus_1);\n\n\tif(m_verbose)\n\t {\n\t print_decrypted(ctxt_res);\n\t cout << endl;\n\t }\n}\n\nvoid Comparator::less_than_mod_3(Ctxt& ctxt_res, const Ctxt& ctxt_x, const Ctxt& ctxt_y) const\n{\n\t//Comp(x,y) = -y(x-y)(x+1)\n\tcout << \"Compute comparison polynomial\" << endl;\n\n\t// x + 1\n\tCtxt x_plus_1 = ctxt_x;\n\tx_plus_1.addConstant(ZZ(1));\n\n\t// y(x - y)\n\tctxt_res = ctxt_x;\n\tctxt_res -= ctxt_y;\n\tctxt_res.multiplyBy(ctxt_y);\n\n\t// -y(x-y)(x+1)\n\tctxt_res.multiplyBy(x_plus_1);\n\tctxt_res.negate();\n\n\tif(m_verbose)\n\t {\n\t print_decrypted(ctxt_res);\n\t cout << endl;\n\t }\n}\n\nvoid Comparator::less_than_mod_5(Ctxt& ctxt_res, const Ctxt& ctxt_x, const Ctxt& ctxt_y) const\n{\n\t//Comp(x,y)=\u2212(x+1) y(x\u2212y) (x (x+1) \u2212 y(x\u2212y)).\n\tcout << \"Compute comparison polynomial\" << endl;\n\n\t// y(x - y)\n\tCtxt y_x_min_y = ctxt_x;\n\ty_x_min_y -= ctxt_y;\n\ty_x_min_y.multiplyBy(ctxt_y);\n\n\t// x + 1\n\tCtxt x_plus_1 = ctxt_x;\n\tx_plus_1.addConstant(ZZ(1));\n\n\t// x * (x+1)\n\tctxt_res = ctxt_x;\n\tctxt_res.multiplyBy(x_plus_1);\n\n\t// x * (x+1) - y * (x-y)\n\tctxt_res -= y_x_min_y;\n\n\t// y * (x-y) * (x * (x+1) - y * (x-y))\n\tctxt_res.multiplyBy(y_x_min_y);\n\n\t// -(x+1) * y * (x-y) * (x * (x+1) - y * (x-y))\n\tctxt_res.multiplyBy(x_plus_1);\n\tctxt_res.negate();\n\n\tif(m_verbose)\n\t {\n\t print_decrypted(ctxt_res);\n\t cout << endl;\n\t }\n}\n\nvoid Comparator::less_than_mod_7(Ctxt& ctxt_res, const Ctxt& ctxt_x, const Ctxt& ctxt_y) const\n{\n\t// Comp(x,y) = -y(x-y)(x+1)(x(x+1)(x(x+1)+3) + 5y(x-y)(x(x+1)+2x+3y(x-y)))\n\tcout << \"Compute comparison polynomial\" << endl;\n\n\t// x\n\tCtxt y_x_min_y = ctxt_x;\n\t// x - y\n\ty_x_min_y -= ctxt_y;\n\t// y(x-y)\n\ty_x_min_y.multiplyBy(ctxt_y);\n\n\t// x + 1\n\tCtxt x_plus_1 = ctxt_x;\n\tx_plus_1.addConstant(ZZ(1));\n\n\t// x(x+1)\n\tCtxt x_x_plus_1 = x_plus_1;\n\tx_x_plus_1.multiplyBy(ctxt_x);\n\n\t// x(x+1)(x(x+1)+3)\n\tCtxt tmp = x_x_plus_1;\n\ttmp.addConstant(ZZ(3));\n\ttmp.multiplyBy(x_x_plus_1);\n\tctxt_res = tmp;\n\n\t// x(x+1) + 2x + 3y(x-y)\n\ttmp = y_x_min_y;\n\ttmp.multByConstant(ZZ(3));\n\ttmp += x_x_plus_1;\n\ttmp += ctxt_x;\n\ttmp += ctxt_x;\n\n\t// 5y(x-y)(x(x+1) + 2x + 3y(x-y))\n\ttmp.multiplyBy(y_x_min_y);\n\ttmp.multByConstant(ZZ(5));\n\n\t// (x^2+x)(x^2+x+3) + 5y(x-y)(x^2+x+2x+3y(x-y))\n\tctxt_res += tmp;\n\n\t// -y(x-y)(x+1)((x^2+x)(x^2+x+3) + 5y(x-y)(x^2+x+2x+3y(x-y)))\n\tctxt_res.multiplyBy(y_x_min_y);\n\tctxt_res.multiplyBy(x_plus_1);\n\tctxt_res.negate();\n\n\tif(m_verbose)\n\t{\n\t\tprint_decrypted(ctxt_res);\n\t\tcout << endl;\n\t}\n}\n\nvoid Comparator::less_than_mod_any(Ctxt& ctxt_res, const Ctxt& ctxt_x, const Ctxt& ctxt_y) const\n{\n\tcout << \"Compute comparison polynomial\" << endl;\n\t\n\tCtxt Y = ctxt_x;\n\t// x - y\n\tY -= ctxt_y;\n\t// Y = y(x-y)\n\tY.multiplyBy(ctxt_y);\n\n\tCtxt x_plus_1 = ctxt_x;\n\t// x+1\n\tx_plus_1.addConstant(ZZ(1));\n\n\tunsigned long p = m_context.getP();\n\n\tunsigned long y_powers = ((p-3) >> 1);\n\t//powers of x\n\tDynamicCtxtPowers x_powers(ctxt_x, p-3);\n\t//powers of Y\n\tDynamicCtxtPowers Y_powers(Y, y_powers);\n\tCtxt Ypow(m_pk);\n\n\tCtxt fx(m_pk);\n\n\tvector fpolys(y_powers);\n\tfor (size_t iPoly = 0; iPoly < y_powers; iPoly++)\n\t{\n\t\tfor (size_t iCoef = 0; iCoef < fcoefs[p][iPoly].size(); iCoef++)\n\t\t{\n\t\t\tSetCoeff(fpolys[iPoly], iCoef+1, fcoefs[p][iPoly][iCoef]);\t\n\t\t}\n\t\tif(iPoly == 0)\n\t\t{\n\t\t\tsimplePolyEval(ctxt_res, fpolys[iPoly], x_powers);\n\t\t}\n\t\telse\n\t\t{\n\t\t\tsimplePolyEval(fx, fpolys[iPoly], x_powers);\n\t\t\tYpow = Y_powers.getPower(iPoly);\n\t\t\tfx.multiplyBy(Ypow);\n\t\t\tctxt_res += fx;\n\t\t}\n\t}\n\n\t// c*Y^y_powers\n\tfx = Y_powers.getPower(y_powers);\n\tfx.multByConstant(ZZ(fcoefs[p][y_powers][0]));\n\tctxt_res += fx;\n\t\n\t// (x+1)*f(x)\n\tctxt_res.multiplyBy(x_plus_1);\n\t// Y*(x+1)*f(x)\n\tctxt_res.multiplyBy(Y);\n\t\n\tif(m_verbose)\n\t{\n\t\tprint_decrypted(ctxt_res);\n\t\tcout << endl;\n\t}\n}\n\nvoid Comparator::evaluate_univar_less_poly(Ctxt& ret, Ctxt& ctxt_p_1, const Ctxt& x) const\n{\n\tHELIB_NTIMER_START(ComparisonCircuitUnivar);\n\t// get p\n\tZZ p = ZZ(m_context.getP());\n\n\tif (p > ZZ(3)) //if p > 3, use the generic Paterson-Stockmeyer strategy\n\t{\n\t // z^2\n\t \tCtxt x2 = x;\n\t \tx2.square();\n\n\t\tDynamicCtxtPowers babyStep(x2, m_bs_num_comp);\n\t\tconst Ctxt& x2k = babyStep.getPower(m_bs_num_comp);\n\n\t\tDynamicCtxtPowers giantStep(x2k, m_gs_num_comp);\n\n\t\t// Special case when #giant_steps is a power of two\n\t\tif (m_gs_num_comp == (1L << NextPowerOfTwo(m_gs_num_comp))) \n\t\t{\n\t\t\t//cout << \"I'm computing degPowerOfTwo\" << endl;\n\t \tdegPowerOfTwo(ret, m_univar_less_poly, m_bs_num_comp, babyStep, giantStep);\n\t }\n\t else\n\t {\n\t\t \trecursivePolyEval(ret, m_univar_less_poly, m_bs_num_comp, babyStep, giantStep);\n\n\t\t \tif (!IsOne(m_top_coef_comp)) \n\t\t \t{\n\t\t \tret.multByConstant(m_top_coef_comp);\n\t\t\t}\n\n\t\t\tif (!IsZero(m_extra_coef_comp)) \n\t\t\t{ // if we added a term, now is the time to subtract back\n\t\t \tCtxt topTerm = giantStep.getPower(m_gs_num_comp);\n\t\t \ttopTerm.multByConstant(m_extra_coef_comp);\n\t\t \tret -= topTerm;\n\t\t\t}\n\t\t}\n\t\tret.multiplyBy(x);\n\n\t\t// TODO: depth here is not optimal\n\t\tCtxt top_term = babyStep.getPower(m_baby_index);\n\t\ttop_term.multiplyBy(giantStep.getPower(m_giant_index));\n\n\t\tctxt_p_1 = top_term; \n\n\t\ttop_term.multByConstant(ZZ((p+1)>> 1));\n\n\t\tret += top_term;\n\n\t\t/*\n\t\tcout << \"Computed baby steps\" << endl;\n\t\tfor(int i = 0; i < babyStep.size(); i++)\n\t\t{\n\t\t\tcout << i + 1 << ' ' << babyStep.isPowerComputed(i+1) << endl; \n\t\t}\n\n\t\tcout << \"Computed giant steps\" << endl;\n\t\tfor(int i = 0; i < giantStep.size(); i++)\n\t\t{\n\t\t\tcout << i + 1 << ' ' << giantStep.isPowerComputed(i+1) << endl;\n\t\t}\n\t\t*/\n\t}\n\telse //circuit for p=3\n\t{\n\t\tret = x;\n\n\t\tctxt_p_1 = x;\n\t\tctxt_p_1.square();\n\n\t\tCtxt top_term = ctxt_p_1;\n\t\ttop_term.multByConstant(ZZ(2));\n\n\t\tret += top_term;\n\t}\n\tHELIB_NTIMER_STOP(ComparisonCircuitUnivar);\n}\n\nvoid Comparator::evaluate_min_max_poly(Ctxt& ctxt_min, Ctxt& ctxt_max, const Ctxt& ctxt_x, const Ctxt& ctxt_y) const\n{\n\tHELIB_NTIMER_START(MinMaxCircuitUnivar);\n\t// get p\n\tZZ p = ZZ(m_context.getP());\n\n\t// Subtraction z = x - y\n\tcout << \"Subtraction\" << endl;\n\tCtxt ctxt_z = ctxt_x;\n\tctxt_z -= ctxt_y;\n\n\tif(m_verbose)\n\t{\n\t\tprint_decrypted(ctxt_z);\n\t\tcout << endl;\n\t}\n \t\n \t// z^2\n \tCtxt ctxt_z2 = ctxt_z;\n \tctxt_z2.square(); \n\n\tif (p > ZZ(3)) //if p > 3, use the generic Paterson-Stockmeyer strategy\n\t{\n\t\tDynamicCtxtPowers babyStep(ctxt_z2, m_bs_num_min);\n\t\tconst Ctxt& ctxt_z2k = babyStep.getPower(m_bs_num_min);\n\n\t\tDynamicCtxtPowers giantStep(ctxt_z2k, m_gs_num_min);\n\n\t\t// compute g(z^2)\n\t\tCtxt g_z2 = Ctxt(ctxt_z2.getPubKey());;\n\t\t// Special case when #giant_steps is a power of two\n\t\tif (m_gs_num_min == (1L << NextPowerOfTwo(m_gs_num_min))) \n\t\t{\n\t\t\t//cout << \"I'm computing degPowerOfTwo\" << endl;\n\t \tdegPowerOfTwo(g_z2, m_univar_min_max_poly, m_bs_num_min, babyStep, giantStep);\n\t }\n\t else\n\t {\n\t\t \trecursivePolyEval(g_z2, m_univar_min_max_poly, m_bs_num_min, babyStep, giantStep);\n\n\t\t \tif (!IsOne(m_top_coef_min)) \n\t\t \t{\n\t\t \tg_z2.multByConstant(m_top_coef_min);\n\t\t\t}\n\n\t\t\tif (!IsZero(m_extra_coef_min)) \n\t\t\t{ // if we added a term, now is the time to subtract back\n\t\t \tCtxt topTerm = giantStep.getPower(m_gs_num_min);\n\t\t \ttopTerm.multByConstant(m_extra_coef_min);\n\t\t \tg_z2 -= topTerm;\n\t\t\t}\n\t\t}\n\n\t\t// last term: ((p+1)/2) * (x + y) \n\t\tCtxt last_term = ctxt_x;\n\t\tlast_term += ctxt_y; \n\t\tlast_term.multByConstant(ZZ((p+1)>> 1));\n\n\t\tctxt_min = last_term;\n\t\tctxt_min += g_z2;\n\t\tctxt_max = last_term;\n\t\tctxt_max -= g_z2;\n\n\t\t/*\n\t\tcout << \"Computed baby steps\" << endl;\n\t\tfor(int i = 0; i < babyStep.size(); i++)\n\t\t{\n\t\t\tcout << i + 1 << ' ' << babyStep.isPowerComputed(i+1) << endl; \n\t\t}\n\n\t\tcout << \"Computed giant steps\" << endl;\n\t\tfor(int i = 0; i < giantStep.size(); i++)\n\t\t{\n\t\t\tcout << i + 1 << ' ' << giantStep.isPowerComputed(i+1) << endl;\n\t\t}\n\t\t*/\n\t}\n\telse //circuit for p=3\n\t{\n\t\t// last term: ((p+1)/2) * (x + y) \n\t\tCtxt last_term = ctxt_x;\n\t\tlast_term += ctxt_y; \n\t\tlast_term.multByConstant(ZZ((p+1)>> 1));\n\n\t\tctxt_min = last_term;\n\t\tctxt_min += ctxt_z2;\n\t\tctxt_max = last_term;\n\t\tctxt_max -= ctxt_z2;\n\t}\n\tHELIB_NTIMER_STOP(MinMaxCircuitUnivar);\n}\n\nvoid Comparator::less_than_bivar(Ctxt& ctxt_res, const Ctxt& ctxt_x, const Ctxt& ctxt_y) const\n{\n HELIB_NTIMER_START(ComparisonCircuitBivar);\n\n //compare with the circuit of Tan et al.\n if (m_type == TAN)\n {\n \tless_than_bivar_tan(ctxt_res, ctxt_x, ctxt_y);\n \treturn;\n }\n\n unsigned long p = m_context.getP();\n\n if(p > 31)\n {\n \tthrow helib::LogicError(\"Bivariate circuit is not implemented for p > 31\");\n }\n\n if(p == 2)\n {\n less_than_mod_2(ctxt_res, ctxt_x, ctxt_y);\n }\n \n if(p == 3)\n {\n \tless_than_mod_3(ctxt_res, ctxt_x, ctxt_y);\n }\n\n if(p == 5)\n {\n \tless_than_mod_5(ctxt_res, ctxt_x, ctxt_y);\n }\n\n if(p == 7)\n {\n \tless_than_mod_7(ctxt_res, ctxt_x, ctxt_y);\n }\n\n if(p > 7)\n {\n less_than_mod_any(ctxt_res, ctxt_x, ctxt_y);\n }\n\n if(m_verbose)\n {\n print_decrypted(ctxt_res);\n cout << endl;\n }\n\n HELIB_NTIMER_STOP(ComparisonCircuitBivar);\n}\n\nvoid Comparator::less_than_bivar_tan(Ctxt& ctxt_res, const Ctxt& ctxt_x, const Ctxt& ctxt_y) const\n{\n\tcout << \"Compute Tan's comparison polynomial\" << endl;\n\n\tlong p = m_context.getP();\n\n\tDynamicCtxtPowers x_powers(ctxt_x, p-1);\n\tDynamicCtxtPowers y_powers(ctxt_y, p-1);\n\n\tctxt_res = y_powers.getPower(p-1);\n\n\tfor (long i = 1; i < p; i++)\n\t{\n\t\t// zero ciphertext\n\t\tCtxt sum = Ctxt(ctxt_x.getPubKey());\n\t\tfor (long j = 1; j < p; j++)\n\t\t{\n\t\t\tif (m_bivar_less_coefs[i][j] == ZZ(0))\n\t\t\t\tcontinue;\n\t\t\tCtxt tmp = y_powers.getPower(j);\n\t\t\ttmp.multByConstant(m_bivar_less_coefs[i][j]);\n\t\t\tsum += tmp;\n\t\t}\n\t\tsum.multiplyBy(x_powers.getPower(i));\n\t\tctxt_res += sum;\n\t}\n}\n\nvoid Comparator::is_zero(Ctxt& ctxt_res, const Ctxt& ctxt_z, long pow) const\n{\n HELIB_NTIMER_START(EqualityCircuit);\n\n ctxt_res = ctxt_z;\n\n //compute mapTo01: (z_i)^{p^d-1}\n //cout << \"Mapping to 0 and 1\" << endl;\n mapTo01_subfield(ctxt_res, pow);\n\n if(m_verbose)\n {\n print_decrypted(ctxt_res);\n cout << endl;\n }\n\n //cout << \"Computing NOT\" << endl;\n //compute 1 - mapTo01(z_i)\n ctxt_res.negate();\n ctxt_res.addConstant(ZZ(1));\n\n if(m_verbose)\n {\n print_decrypted(ctxt_res);\n cout << endl;\n }\n\n HELIB_NTIMER_STOP(EqualityCircuit);\n}\n\nvoid Comparator::compare(Ctxt& ctxt_res, const Ctxt& ctxt_x, const Ctxt& ctxt_y) const\n{\n\tHELIB_NTIMER_START(Comparison);\n\n\tvector ctxt_less_p;\n\tvector ctxt_eq_p;\n\n\t// bivariate circuit\n\tif (m_type == BI || m_type == TAN)\n\t{\n\t\t//cout << \"Extraction\" << endl;\n\t\t// extract mod p coefficients\n\t\tvector ctxt_x_p;\n\t\textract_mod_p(ctxt_x_p, ctxt_x);\n\n\t\tif(m_verbose)\n\t {\n\t \tfor (long iCoef = 0; iCoef < m_slotDeg; iCoef++)\n\t \t{\n\t \t\tcout << \"Ctxt x with coefficient \" << iCoef << endl;\n\t\t \tprint_decrypted(ctxt_x_p[iCoef]);\n\t\t \tcout << endl;\n\t\t }\n\t\t}\n\n\t\tvector ctxt_y_p;\n\t\textract_mod_p(ctxt_y_p, ctxt_y);\n\n\t\tif(m_verbose)\n\t {\n\t \tfor (long iCoef = 0; iCoef < m_slotDeg; iCoef++)\n\t \t{\n\t \t\tcout << \"Ctxt y with coefficient \" << iCoef << endl;\n\t\t \tprint_decrypted(ctxt_y_p[iCoef]);\n\t\t \tcout << endl;\n\t\t }\n\t\t}\n\n\t\t//cout << \"Compute the less-than function modulo p\" << endl;\n\t\tfor (long iCoef = 0; iCoef < m_slotDeg; iCoef++)\n\t\t{\n\t\t\tCtxt ctxt_tmp = Ctxt(ctxt_x.getPubKey());\n\t\t\tless_than_bivar(ctxt_tmp, ctxt_x_p[iCoef], ctxt_y_p[iCoef]);\n\t\t\tctxt_less_p.push_back(ctxt_tmp);\n\t\t}\n\n\t\t//cout << \"Compute the equality function modulo p\" << endl;\n\t\tfor (long iCoef = 0; iCoef < m_slotDeg; iCoef++)\n\t\t{\n\t\t\t// Subtraction z = x - y\n\t\t\t//cout << \"Subtraction\" << endl;\n\t\t\tCtxt ctxt_z = ctxt_x_p[iCoef];\n\t\t\tctxt_z -= ctxt_y_p[iCoef];\n\t\t\tCtxt ctxt_tmp = Ctxt(ctxt_z.getPubKey());\n\t\t\tis_zero(ctxt_tmp, ctxt_z);\n\t\t\tctxt_eq_p.push_back(ctxt_tmp);\n\t\t}\n\t}\n\telse // univariate circuit\n\t{\n\t\t// Subtraction z = x - y\n\t\t//cout << \"Subtraction\" << endl;\n\t\tCtxt ctxt_z = ctxt_x;\n\t\tctxt_z -= ctxt_y;\n\n\t\tif(m_verbose)\n\t\t{\n\t\t\tprint_decrypted(ctxt_z);\n\t\t\tcout << endl;\n\t\t}\n\n\t\t// extract mod p coefficients\n\t\t//cout << \"Extraction\" << endl;\n\t\tvector ctxt_z_p;\n\t\textract_mod_p(ctxt_z_p, ctxt_z);\n\n\t\tif(m_verbose)\n\t {\n\t \tfor (long iCoef = 0; iCoef < m_slotDeg; iCoef++)\n\t \t{\n\t \t\tcout << \"Ctxt x with coefficient \" << iCoef << endl;\n\t\t \tprint_decrypted(ctxt_z_p[iCoef]);\n\t\t \tcout << endl;\n\t\t }\n\t\t}\n\n\t\t//cout << \"Compute the less-than and equality functions modulo p\" << endl;\n\t\tfor (long iCoef = 0; iCoef < m_slotDeg; iCoef++)\n\t\t{\n\t\t\tCtxt ctxt_tmp = Ctxt(ctxt_z.getPubKey());\n\t\t\tCtxt ctxt_tmp_eq = Ctxt(ctxt_z.getPubKey());\n\n\t\t\t// compute polynomial function for 'z < 0'\n\t\t\t//cout << \"Compute univariate comparison polynomial\" << endl;\n\t\t\tevaluate_univar_less_poly(ctxt_tmp, ctxt_tmp_eq, ctxt_z_p[iCoef]);\n\n\t\t\tif(m_verbose)\n\t\t\t{\n\t\t\t cout << \"Result of the less-than function\" << endl;\n\t\t\t print_decrypted(ctxt_tmp);\n\t\t\t cout << endl;\n\t\t\t}\n\n\t\t\tctxt_less_p.push_back(ctxt_tmp);\n\n\t\t\t//cout << \"Computing NOT\" << endl;\n\t\t\t//compute 1 - mapTo01(r_i*(x_i - y_i))\n\t\t\tctxt_tmp_eq.negate();\n\t\t\tctxt_tmp_eq.addConstant(ZZ(1));\n\n\t\t\tif(m_verbose)\n\t\t\t{\n\t\t\t cout << \"Result of the equality function\" << endl;\n\t\t\t print_decrypted(ctxt_tmp_eq);\n\t\t\t cout << endl;\n\t\t\t}\n\n\t\t\tctxt_eq_p.push_back(ctxt_tmp_eq);\n\t\t}\t\n\t}\n\n\t//cout << \"Compare digits\" << endl;\n\tCtxt ctxt_less = ctxt_less_p[m_slotDeg-1];\n\tCtxt ctxt_eq = ctxt_eq_p[m_slotDeg-1];\n\n\tfor (long iCoef = m_slotDeg-2; iCoef >= 0; iCoef--)\n\t{\n\t\tCtxt tmp = ctxt_eq;\n\t\ttmp.multiplyBy(ctxt_less_p[iCoef]);\n\t\tctxt_less += tmp;\n\n\t\tctxt_eq.multiplyBy(ctxt_eq_p[iCoef]);\n\t}\n\n\tif(m_verbose)\n\t{\n\t\tcout << \"Comparison results\" << endl;\n\t\tprint_decrypted(ctxt_less);\n\t\tcout << endl;\n\n\t\tcout << \"Equality results\" << endl;\n\t\tprint_decrypted(ctxt_eq);\n\t\tcout << endl;\n\t}\n\n\tif(m_expansionLen == 1)\n\t{\n\t\tctxt_res = ctxt_less;\n\t\treturn;\n\t}\n\n\n\t//compute running products: prod_i 1 - (x_i - y_i)^{p^d-1}\n\t//cout << \"Rotating and multiplying slots with equalities\" << endl;\n\tshift_and_mul(ctxt_eq, 0);\n\n\tif(m_verbose)\n\t{\n\t\tprint_decrypted(ctxt_eq);\n\t\tcout << endl;\n\t}\n\n\t//Remove the least significant digit and shift to the left\n\t//cout << \"Remove the least significant digit\" << endl;\n\tbatch_shift_for_mul(ctxt_eq, 0, -1);\n\n\tif(m_verbose)\n\t{\n\t\tprint_decrypted(ctxt_eq);\n\t\tcout << endl;\n\t}\n\n\t//cout << \"Final result\" << endl;\n\n\tctxt_res = ctxt_eq;\n\tctxt_res.multiplyBy(ctxt_less);\n\tshift_and_add(ctxt_res, 0);\n\n\tif(m_verbose)\n\t{\n\t\tprint_decrypted(ctxt_res);\n\t\tcout << endl;\n\t}\n\n\tif(m_verbose)\n {\n cout << \"Input x: \" << endl;\n print_decrypted(ctxt_x);\n cout << endl;\n cout << \"Input y: \" << endl;\n print_decrypted(ctxt_y);\n cout << endl;\n }\t\n\n HELIB_NTIMER_STOP(Comparison);\n}\n\nvoid Comparator::min_max_digit(Ctxt& ctxt_min, Ctxt& ctxt_max, const Ctxt& ctxt_x, const Ctxt& ctxt_y) const\n{\n\tHELIB_NTIMER_START(MinMaxDigit);\n\tif(m_type != UNI)\n\t\tthrow helib::LogicError(\"Min/Max is not implemented with the bivariate circuit\");\n\n\tif(m_expansionLen != 1 || m_slotDeg != 1)\n\t\tthrow helib::LogicError(\"Min/Max is not implemented for vectors over F_p\");\n\n\t// get EncryptedArray\n \tconst EncryptedArray& ea = m_context.getEA();\n \t//extract slots\n\tlong nSlots = ea.size();\n\n\tvector ctxt_min_p;\n\tvector ctxt_max_p;\n\n\t// extract mod p coefficients\n\tcout << \"Extraction\" << endl;\n\tvector ctxt_x_p;\n\textract_mod_p(ctxt_x_p, ctxt_x);\n\n\tif(m_verbose)\n {\n \tfor (long iCoef = 0; iCoef < m_slotDeg; iCoef++)\n \t{\n \t\tcout << \"Ctxt x with coefficient \" << iCoef << endl;\n\t \tprint_decrypted(ctxt_x_p[iCoef]);\n\t \tcout << endl;\n\t }\n\t}\n\n\tvector ctxt_y_p;\n\textract_mod_p(ctxt_y_p, ctxt_y);\n\n\tif(m_verbose)\n {\n \tfor (long iCoef = 0; iCoef < m_slotDeg; iCoef++)\n \t{\n \t\tcout << \"Ctxt y with coefficient \" << iCoef << endl;\n\t \tprint_decrypted(ctxt_y_p[iCoef]);\n\t \tcout << endl;\n\t }\n\t}\n\n\tcout << \"Compute min/max functions modulo p\" << endl;\n\tfor (long iCoef = 0; iCoef < m_slotDeg; iCoef++)\n\t{\n\t\tCtxt ctxt_tmp_min = Ctxt(ctxt_x.getPubKey());\n\t\tCtxt ctxt_tmp_max = Ctxt(ctxt_x.getPubKey());\n\n\t\t// compute polynomial function for 'z < 0'\n\t\tcout << \"Compute univariate min/max polynomial\" << endl;\n\t\tevaluate_min_max_poly(ctxt_tmp_min, ctxt_tmp_max, ctxt_x_p[iCoef], ctxt_y_p[iCoef]);\n\n\t\tif(m_verbose)\n\t\t{\n\t\t cout << \"Result of the min function\" << endl;\n\t\t print_decrypted(ctxt_tmp_min);\n\t\t cout << endl;\n\t\t}\n\n\t\tif(m_verbose)\n\t\t{\n\t\t cout << \"Result of the max function\" << endl;\n\t\t print_decrypted(ctxt_tmp_max);\n\t\t cout << endl;\n\t\t}\n\n\t\tctxt_min_p.push_back(ctxt_tmp_min);\n\t\tctxt_max_p.push_back(ctxt_tmp_max);\n\t}\n\n\tctxt_min = ctxt_min_p[0];\n\tctxt_max = ctxt_max_p[0];\n\n\tfor (long iCoef = 1; iCoef < m_slotDeg; iCoef++)\n\t{\n\t\tvector x_power(nSlots, ZZX(INIT_MONO, iCoef, 1));\n\t\tZZX x_power_ptxt;\n\t\tea.encode(x_power_ptxt, x_power);\n\n\t\t// agregate minimum values\n\t\tCtxt tmp = ctxt_min_p[iCoef];\n\t\ttmp.multByConstant(x_power_ptxt);\n\t\tctxt_min += tmp;\n\n\t\t// agregate maximum values\n\t\ttmp = ctxt_max_p[iCoef];\n\t\ttmp.multByConstant(x_power_ptxt);\n\t\tctxt_max += tmp;\n\t}\n\n\tHELIB_NTIMER_STOP(MinMaxDigit);\n}\n\nvoid Comparator::min_max(Ctxt& ctxt_min, Ctxt& ctxt_max, const Ctxt& ctxt_x, const Ctxt& ctxt_y) const\n{\n\tHELIB_NTIMER_START(MinMax);\n\tif(m_type == UNI && m_expansionLen == 1 && m_slotDeg == 1)\n\t{\n\t\tmin_max_digit(ctxt_min, ctxt_max, ctxt_x, ctxt_y);\n\t\treturn;\n\t}\n\n\tCtxt ctxt_z = ctxt_x;\n\tctxt_z -= ctxt_y;\n\n\tCtxt ctxt_tmp = Ctxt(ctxt_z.getPubKey());\n\tcompare(ctxt_tmp, ctxt_x, ctxt_y);\n\tctxt_tmp.multiplyBy(ctxt_z);\n\n\tctxt_min = ctxt_y;\n\tctxt_min += ctxt_tmp;\n\n\tctxt_max = ctxt_x;\n\tctxt_max -= ctxt_tmp;\n\n\tif(m_verbose)\n\t{\n\t\tcout << \"Minimum\" << endl;\n\t\tprint_decrypted(ctxt_min);\n\t\tcout << endl;\n\t}\n\n\tif(m_verbose)\n\t{\n\t\tcout << \"Maximum\" << endl;\n\t\tprint_decrypted(ctxt_max);\n\t\tcout << endl;\n\t}\n\n\tif(m_verbose)\n {\n cout << \"Input x: \" << endl;\n print_decrypted(ctxt_x);\n cout << endl;\n cout << \"Input y: \" << endl;\n print_decrypted(ctxt_y);\n cout << endl;\n }\n\tHELIB_NTIMER_STOP(MinMax);\n}\n\nvoid Comparator::array_min(Ctxt& ctxt_res, const vector& ctxt_in, long depth) const\n{\n\tHELIB_NTIMER_START(ArrayMin);\n\n\tif (depth < 0)\n\t\tthrow helib::LogicError(\"depth parameter must be non-negative\");\n\n\tcout << \"Computing the minimum of an array\" << endl;\n\n\tsize_t input_len = ctxt_in.size();\n\n\tvector ctxt_res_vec;\n\tfor (size_t i = 0; i < input_len; i++)\n\t{\n\t\tctxt_res_vec.push_back(ctxt_in[i]);\n\t}\n\n\tsize_t cur_len = input_len;\n\tlong level = depth;\n\n\twhile(cur_len > 1 && level > 0)\n\t{\n\t\tcout << \"Comparison level: \" << depth-level << endl;\n\t\t// compare x[i] and x[n-1-i] where n is the length of ctxt_res_vec\n\t\tfor (size_t i = 0; i < (cur_len >> 1); i++)\n\t\t{\n\t\t\tif(i != cur_len - 1 - i)\n\t\t\t{\n\t\t\t\tcout << \"Comparing ciphertexts \" << i << \" and \" << cur_len - 1 - i << endl;\n\t\t\t\tmin_max(ctxt_res_vec[i], ctxt_res_vec[cur_len - 1 - i], ctxt_res_vec[i], ctxt_res_vec[cur_len - 1 - i]);\n\t\t\t}\n\t\t}\n\t\tcur_len = (cur_len >> 1) + (cur_len % 2);\n\t\tctxt_res_vec.resize(cur_len, Ctxt(m_pk));\n\t\tlevel--;\n\t}\n\n\tif(cur_len > 1)\n\t{\n\t\t// plaintext modulus\n \t\tlong p = m_context.getP();\n\t\t// multiplications in the equality circuit\n\t\tlong eq_mul_num = static_cast(floor(log2(p-1))) + weight(ZZ(p-1)) - 1;\n\t\tlong eq_depth = static_cast(ceil(log2(p-1)));\n\t\tlong prod_depth = static_cast(ceil(log2(cur_len-1)));\n\n\t\tif ((((cur_len - 2 > eq_mul_num) && (eq_depth == prod_depth)) || (eq_depth < prod_depth)) && cur_len <= p)\n\t\t{\n\t\t\tcout << \"Computing minimum via equality\" << endl;\n\t\t\tcout << \"Mult. of equality: \" << eq_mul_num << endl;\n\t\t\tcout << \"Depth of equality: \" << eq_depth << endl;\n\t\t\tcout << \"Depth of product: \" << prod_depth << endl;\n\t\t\t// create a table with all pairwise comparisons and compute the Hamming weight of every row\n\t\t\tvector ham_weights;\n\t\t\tget_sorting_index(ham_weights, ctxt_res_vec);\n\n\t\t\tcout << \"Computing the minimum\" << endl;\n\t\t\tctxt_res = Ctxt(m_pk);\n\t\t\tfor(size_t i = 0; i < ctxt_res_vec.size(); i++)\n\t\t\t{\n\t\t\t\t//compare the Hamming weight of the jth row with i\n\t\t\t\tCtxt tmp_prod = ham_weights[i];\n\t\t\t\ttmp_prod.addConstant(ZZX(-(cur_len-1)));\n\t\t\t\tmapTo01_subfield(tmp_prod, 1);\n\t\t\t\ttmp_prod.negate();\n\t\t\t\ttmp_prod.addConstant(ZZX(1));\n\n\t\t\t\t//multiply by the jth input ciphertext\n\t\t\t\ttmp_prod.multiplyBy(ctxt_res_vec[i]);\n\t\t\t\tif(i == 0)\n\t\t\t\t\tctxt_res = tmp_prod;\n\t\t\t\telse\n\t\t\t\t\tctxt_res += tmp_prod;\n\t\t\t}\n\t\t}\n\t\telse \n\t\t{\n\t\t\tcout << \"Computing minimum via punctured products\" << endl;\n\t\t\tcout << \"Mult. of equality: \" << eq_mul_num << endl;\n\t\t\tcout << \"Depth of equality: \" << eq_depth << endl;\n\t\t\tcout << \"Depth of product: \" << prod_depth << endl;\n\t\t\tvector> ctxt_products;\n\t\t\t// compute the product of every row\n\t\t\tfor(size_t i = 0; i < ctxt_res_vec.size(); i++)\n\t\t\t{\n\t\t\t\tvector ctxt_vec;\n\t\t\t\tctxt_products.push_back(ctxt_vec);\n\t\t\t}\n\n\t\t\tcout << \"Computing the comparison table\" << endl;\n\t\t\tfor (size_t i = 0; i < ctxt_res_vec.size() - 1; i++)\n\t\t\t{\n\t\t\t\tcout << \"Computing Row \" << i << endl;\n\t\t\t\tfor(size_t j = i + 1; j < ctxt_res_vec.size(); j++)\n\t\t\t\t{\n\t\t\t\t\tcout << \"Computing Column \" << j << endl;\n\t\t\t\t\t// compute upper diagonal entries of the comparison table and multiply them\n\t\t\t\t\tCtxt comp_col = Ctxt(m_pk);\n\t\t\t\t\tcompare(comp_col, ctxt_res_vec[i], ctxt_res_vec[j]);\n\n\t\t\t\t\tif (ctxt_products[i].empty())\n\t\t\t\t\t{\n\t\t\t\t\t\tctxt_products[i].push_back(comp_col);\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\tlong wt = weight(ZZ(j));\n\t\t\t\t\t\tint len_i = ctxt_products[i].size();\n\t\t\t\t\t\tif (wt > len_i)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tctxt_products[i].push_back(comp_col);\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tctxt_products[i][len_i - 1].multiplyBy(comp_col);\n\t\t\t\t\t\t\tfor (int k = len_i - 2; k >= (wt-1); k--)\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tctxt_products[i][k].multiplyBy(ctxt_products[i][k+1]);\n\t\t\t\t\t\t\t\tctxt_products[i].pop_back();\t\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\t// compute lower diagonal entries of the comparison table by transposition and logical negation of upper diagonal entries\n\t\t\t\t\t//NOT the result to multiply to the jth row\n\t\t\t\t\tcomp_col.negate();\n\t\t\t\t\tcomp_col.addConstant(ZZ(1));\n\n\t\t\t\t\tif (ctxt_products[j].empty())\n\t\t\t\t\t{\n\t\t\t\t\t\tctxt_products[j].push_back(comp_col);\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\tlong wt = weight(ZZ(i+1));\n\t\t\t\t\t\tint len_j = ctxt_products[j].size();\n\t\t\t\t\t\tif (wt > len_j)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tctxt_products[j].push_back(comp_col);\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tctxt_products[j][len_j - 1].multiplyBy(comp_col);\n\t\t\t\t\t\t\tfor (int k = len_j - 2; k >= (wt-1); k--)\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tctxt_products[j][k].multiplyBy(ctxt_products[j][k+1]);\n\t\t\t\t\t\t\t\tctxt_products[j].pop_back();\t\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tcout << \"Computing the minimum\" << endl;\n\t\t\tctxt_res = Ctxt(m_pk);\n\t\t\tfor(size_t i = 0; i < ctxt_res_vec.size(); i++)\n\t\t\t{\n\t\t\t\tint len_i = ctxt_products[i].size();\n\t\t\t\tfor (int k = len_i - 2; k >= 0; k--)\n\t\t\t\t{\n\t\t\t\t\tctxt_products[i][k].multiplyBy(ctxt_products[i][k+1]);\n\t\t\t\t\tctxt_products[i].pop_back();\t\n\t\t\t\t}\n\t\t\t\tCtxt tmp_prod = ctxt_products[i][0];\n\n\t\t\t\t//multiply by the ith input ciphertext\n\t\t\t\ttmp_prod.multiplyBy(ctxt_res_vec[i]);\n\n\t\t\t\t//add to the result\n\t\t\t\tctxt_res += tmp_prod;\n\t\t\t}\n\t\t}\n\t}\n\telse\n\t{\n\t\tctxt_res = ctxt_res_vec[0];\n\t}\n\n\tHELIB_NTIMER_STOP(ArrayMin);\n}\n\nvoid Comparator::int_to_slot(ZZX& poly, unsigned long input, unsigned long enc_base) const\n{ \n vector decomp;\n\n //decomposition of a digit\n digit_decomp(decomp, input, enc_base, m_slotDeg);\n poly = ZZX(INIT_MONO, 0, 0);\n for (int iCoef = 0; iCoef < m_slotDeg; iCoef++)\n {\n poly+=ZZX(INIT_MONO, iCoef, decomp[iCoef]);\n }\n}\n\nvoid Comparator::get_sorting_index(vector& ctxt_out, const vector& ctxt_in) const\n{\n\tctxt_out.clear();\n\n\t// length of the input vector\n\tsize_t input_len = ctxt_in.size();\n\n\t// plaintext modulus\n \tlong p = m_context.getP();\n\n\tif (input_len > p)\n\t\tthrow helib::LogicError(\"The number of ciphertexts cannot be larger than the plaintext modulus\");\n\n\t// compute the Hamming weight of every row\n\tfor(size_t i = 0; i < input_len; i++)\n\t{\n\t\t//initialize Hamming weights to zero\n\t\tCtxt ctxt_tmp = Ctxt(ctxt_in[0].getPubKey());\n\t\tctxt_out.push_back(ctxt_tmp);\n\t}\n\n\tcout << \"Computing the comparison table\" << endl;\n\tfor (size_t i = 0; i < input_len - 1; i++)\n\t{\n\t\tcout << \"Computing Row \" << i << endl;\n\t\tfor(size_t j = i + 1; j < input_len; j++)\n\t\t{\n\t\t\tcout << \"Computing Column \" << j << endl;\n\t\t\t// compute upper diagonal entries of the comparison table and sum them\n\t\t\tCtxt comp_col_j = Ctxt(ctxt_in[0].getPubKey());\n\t\t\tcompare(comp_col_j, ctxt_in[i], ctxt_in[j]);\n\t\t\tctxt_out[i] += comp_col_j;\n\n\t\t\t// compute lower diagonal entries of the comparison table by transposition and logical negation of upper diagonal entries\n\t\t\t//NOT the result to add to the jth row\n\t\t\tcomp_col_j.negate();\n\t\t\tcomp_col_j.addConstant(ZZ(1));\n\n\t\t\t// add lower diagonal entries to Hamming weight accumulators of related rows\n\t\t\tctxt_out[j] += comp_col_j;\n\t\t}\n\t}\n}\n\nvoid Comparator::sort(vector& ctxt_out, const vector& ctxt_in) const\n{\n\tHELIB_NTIMER_START(Sorting);\n\n\tctxt_out.clear();\n\n\t// length of the input vector\n\tsize_t input_len = ctxt_in.size();\n\n\t// plaintext modulus\n \tlong p = m_context.getP();\n\n\tif (input_len > p)\n\t\tthrow helib::LogicError(\"The number of ciphertexts cannot be larger than the plaintext modulus\");\n\n\t// multiplications in the equality circuit\n\tlong eq_mul_num = static_cast(floor(log2(p-1))) + weight(ZZ(p-1)) - 1;\n\tcout << \"Multiplications in the equality circuit: \" << eq_mul_num << endl;\n\n\t// create a table with all pairwise comparisons and compute the Hamming weight of every row\n\tvector ham_weights;\n\n\tget_sorting_index(ham_weights, ctxt_in);\n\t/*\n\tfor(size_t i = 0; i < input_len; i++)\n\t{\n\t\t//initialize Hamming weights to zero\n\t\tCtxt ctxt_tmp = Ctxt(ctxt_in[0].getPubKey());\n\t\tham_weights.push_back(ctxt_tmp);\n\t}\n\n\tcout << \"Computing the comparison table\" << endl;\n\tfor (size_t i = 0; i < input_len - 1; i++)\n\t{\n\t\tcout << \"Computing Row \" << i << endl;\n\t\tfor(size_t j = i + 1; j < input_len; j++)\n\t\t{\n\t\t\tcout << \"Computing Column \" << j << endl;\n\t\t\t// compute upper diagonal entries of the comparison table and sum them\n\t\t\tCtxt comp_col_j = Ctxt(ctxt_in[0].getPubKey());\n\t\t\tcompare(comp_col_j, ctxt_in[i], ctxt_in[j]);\n\t\t\tham_weights[i] += comp_col_j;\n\n\t\t\t// compute lower diagonal entries of the comparison table by transposition and logical negation of upper diagonal entries\n\t\t\t//NOT the result to add to the jth row\n\t\t\tcomp_col_j.negate();\n\t\t\tcomp_col_j.addConstant(ZZ(1));\n\n\t\t\t// add lower diagonal entries to Hamming weight accumulators of related rows\n\t\t\tham_weights[j] += comp_col_j;\n\t\t}\n\t}\n\t*/\n\n\t// print Hamming weights\n\t/*\n\tfor(size_t i = 0; i < input_len; i++)\n\t{\n\t\tcout << i << \" Row:\" << endl;\n\t\tprint_decrypted(ham_weights[i]);\n \tcout << endl;\n\t}\n\t*/\n\n\tif(eq_mul_num * input_len <= p - 2)\n\t//if(true)\n\t{\n\t\tfor(size_t i = 0; i < input_len; i++)\n\t\t{\n\t\t\tcout << \"Computing Element \" << i << endl;\n\t\t\tCtxt tmp_sum = Ctxt(ctxt_in[i].getPubKey());\n\t\t\tfor(size_t j = 0; j < input_len; j++)\n\t\t\t{\n\t\t\t\t//compare the Hamming weight of the jth row with i\n\t\t\t\tCtxt tmp_prod = ham_weights[j];\n\t\t\t\ttmp_prod.addConstant(ZZX(-i));\n\t\t\t\tmapTo01_subfield(tmp_prod, 1);\n\t\t\t\ttmp_prod.negate();\n\t\t\t\ttmp_prod.addConstant(ZZX(1));\n\n\t\t\t\t//multiply by the jth input ciphertext\n\t\t\t\ttmp_prod.multiplyBy(ctxt_in[j]);\n\t\t\t\ttmp_sum += tmp_prod;\n\t\t\t}\n\t\t\tctxt_out.push_back(tmp_sum);\n\t\t}\n\t}\n\telse\n\t{\n\t\t// equality sums\n\t\tvector eq_sums;\n\n\t\t// fill ctxt_out with zeros\n\t\tfor(size_t i = 0; i < input_len; i++)\n\t\t{\n\t\t\tctxt_out.push_back(Ctxt(ctxt_in[0].getPubKey()));\n\t\t\teq_sums.push_back(Ctxt(ctxt_in[0].getPubKey()));\n\t\t}\n\n\t\tfor (size_t i = 0; i < input_len; i++)\n\t\t{\n\t\t\tcout << \"Adding element \" << i << endl;\n\n\t\t\t// hw_i^j, j in [1,p-1]\n\t\t\tDynamicCtxtPowers hw_powers(ham_weights[i], p-1);\n\t\t\t\n\t\t\t// eq_sums[0] = 1 - hw_i^(p-1)\n\t\t\teq_sums[0].clear();\n\t\t\teq_sums[0] = hw_powers.getPower(p-1);\n\t\t\teq_sums[0].negate();\n\t\t\teq_sums[0].addConstant(ZZX(1));\n\n\t\t\t// eq_sums[0] * ctxt_in[i]\n\t\t\teq_sums[0].multiplyBy(ctxt_in[i]);\n\n\t\t\t// sum_i eq_sums[0] * ctxt_in[i]\n\t\t\tctxt_out[0] += eq_sums[0];\t\n\n\t\t\tfor (size_t k = 1; k < input_len; k++)\n\t\t\t{\n\t\t\t\t// zeroize\n\t\t\t\teq_sums[k].clear();\n\n\t\t\t\t// current sorting index\n\t\t\t\tZZ_p k_zzp;\n\t\t\t\tk_zzp.init(ZZ(p));\n\t\t\t\tk_zzp = k;\n\n\t\t\t\tZZ_p k_power;\n\t\t\t\tk_power.init(ZZ(p));\n\n\t\t\t\tfor (int j = 1; j < p; j++)\n\t\t\t\t{\n\t\t\t\t\t// k^(p-1-j) mod p\n\t\t\t\t\tk_power = power(k_zzp, p - 1 - j);\n\t\t\t\t\t// hw_i^j\n\t\t\t\t\tCtxt tmp = hw_powers.getPower(j);\n\t\t\t\t\t// hw_i^j * k^(p-1-j)\n\t\t\t\t\ttmp.multByConstant(rep(k_power));\n\t\t\t\t\t// sum hw_i^j * k^(p-1-j)\n\t\t\t\t\teq_sums[k] += tmp;\n\t\t\t\t}\n\t\t\t\t// add k^(p-1) to eq_sums\n\t\t\t\teq_sums[k].addConstant(rep(power(k_zzp, p-1)));\n\n\t\t\t\t// 1 - sum_(j=0)^(p-1) hw_i^j * k^(p-1-j)\n\t\t\t\teq_sums[k].negate();\n\t\t\t\teq_sums[k].addConstant(ZZX(1));\n\n\t\t\t\t// eq_sums[k] * ctxt_in[i]\n\t\t\t\teq_sums[k].multiplyBy(ctxt_in[i]);\n\n\t\t\t\t// sum_i eq_sums[k] * ctxt_in[i]\n\t\t\t\tctxt_out[k] += eq_sums[k];\n\t\t\t}\n\t\t}\n\t}\n\t\n\n\t// print output ciphertexts\n\t/*\n\tfor(size_t i = 0; i < input_len; i++)\n\t{\n\t\tcout << i << \" ctxt:\" << endl;\n\t\tprint_decrypted(ctxt_out[i]);\n \tcout << endl;\n\t}\n\t*/\n\tHELIB_NTIMER_STOP(Sorting);\n}\n\nvoid Comparator::test_sorting(int num_to_sort, long runs) const\n{\n\t//reset timers\n setTimersOn();\n \n // initialize the random generator\n random_device rd;\n mt19937 eng(rd());\n uniform_int_distribution distr_u;\n uniform_int_distribution distr_i;\n\n // get EncryptedArray\n const EncryptedArray& ea = m_context.getEA();\n\n //extract number of slots\n long nslots = ea.size();\n\n //get p\n unsigned long p = m_context.getP();\n\n //order of p\n unsigned long ord_p = m_context.getOrdP();\n\n //amount of numbers in one ciphertext\n unsigned long numbers_size = nslots / m_expansionLen;\n\n // number of slots occupied by encoded numbers\n unsigned long occupied_slots = numbers_size * m_expansionLen;\n\n //encoding base, ((p+1)/2)^d\n //if 2-variable comparison polynomial is used, it must be p^d\n unsigned long enc_base = (p + 1) >> 1;\n if (m_type == BI || m_type == TAN)\n {\n \tenc_base = p;\n }\n\n unsigned long digit_base = power_long(enc_base, m_slotDeg);\n\n //check that field_size^expansion_len fits into 64-bits\n int space_bit_size = static_cast(ceil(m_expansionLen * log2(digit_base)));\n unsigned long input_range = ULONG_MAX;\n if(space_bit_size < 64)\n {\n //input_range = power_long(field_size, expansion_len);\n input_range = power_long(digit_base, m_expansionLen);\n }\n cout << \"Maximal input: \" << input_range << endl;\n\n long min_capacity = 1000;\n long capacity;\n\n for (int run = 0; run < runs; run++)\n {\n printf(\"Run %d started\\n\", run);\n\n // all slots contain the same value\n vector> expected_result;\n for (int i = 0; i < num_to_sort; i++)\n {\n \tvector tmp_vec(occupied_slots,ZZX(INIT_MONO,0,0));\n \texpected_result.push_back(tmp_vec);\n }\n \n // vector of input longs\n vector> input_xs;\n for (int i = 0; i < numbers_size; i++)\n {\n \tvector tmp_vec(num_to_sort,0);\n \tinput_xs.push_back(tmp_vec);\n }\n\n ZZX pol_slot;\n\n //ciphertexts to sort\n vector ctxt_in;\n\n //sorted ciphertexts\n vector ctxt_out;\n\n for (int i = 0; i < num_to_sort; i++)\n {\n\t\t// the plaintext polynomials\n\t\tvector pol_x(nslots);\n\n\t\t//encoding of slots\n\t\tfor (int k = 0; k < numbers_size; k++)\n\t\t{\n\t\t\tunsigned long input_x = distr_u(eng) % input_range;\n\n\t\t\tinput_xs[k][i] = input_x;\n\t\t\n\t\t\tif(m_verbose)\n\t\t\t{\n\t\t\t\tcout << \"Input\" << endl;\n\t\t\t\tcout << input_x << endl;\n\t\t\t}\n\n\t\t\tvector decomp_int_x;\n\n\t\t\t//decomposition of input integers\n\t\t\tdigit_decomp(decomp_int_x, input_x, digit_base, m_expansionLen);\n\t\t\tfor (int j = 0; j < m_expansionLen; j++)\n\t\t\t{\n\t\t\t //decomposition of a digit\n\t\t\t int_to_slot(pol_slot, decomp_int_x[j], enc_base);\n\t\t\t pol_x[k * m_expansionLen + j] = pol_slot;\n\t\t\t}\n\t\t}\n\n \tif(m_verbose)\n\t {\n\t cout << \"Input\" << endl;\n\t for(int j = 0; j < nslots; j++)\n\t {\n\t printZZX(cout, pol_x[j], ord_p);\n\t cout << endl;\n\t }\n\t }\n\n\t Ctxt ctxt_x(m_pk);\n \tea.encrypt(ctxt_x, m_pk, pol_x);\n\n \tctxt_in.push_back(ctxt_x);\n }\n\n //cout << \"Input\" << endl;\n for (int i = 0; i < numbers_size; i++)\n {\n \t//for (int j = 0; j < num_to_sort; j++)\n \t//\tcout << input_xs[i][j] << \" \";\n \t//cout << endl;\n \tstd::sort(input_xs[i].begin(), input_xs[i].end());\n }\n\n //cout << \"Expected results\" << endl;\n for (int i = num_to_sort-1; i >= 0; i--)\n {\n\t\tfor (int k = 0; k < numbers_size; k++)\n\t\t{\n\t\t\tvector decomp_int_x;\n\n\t\t\t//decomposition of input integers\n\t\t\tdigit_decomp(decomp_int_x, input_xs[k][i], digit_base, m_expansionLen);\n\t\t\tfor (int j = 0; j < m_expansionLen; j++)\n\t\t\t{\n\t\t\t //decomposition of a digit\n\t\t\t int_to_slot(pol_slot, decomp_int_x[j], enc_base);\n\t\t\t expected_result[num_to_sort-1-i][k * m_expansionLen + j] = pol_slot;\n\t\t\t}\n\t\t}\n\n\t\t/*\n\t\tcout << num_to_sort-1-i << endl;\n\t\tfor(int j = 0; j < nslots; j++)\n\t\t{\n\t\t\tprintZZX(cout, expected_result[num_to_sort-1-i][j], ord_p);\n \tcout << endl;\n\t\t}\n\t\t*/\t\n }\n\n // comparison function\n cout << \"Start of sorting\" << endl;\n this->sort(ctxt_out, ctxt_in);\n\n printNamedTimer(cout, \"Extraction\");\n printNamedTimer(cout, \"ComparisonCircuitBivar\");\n printNamedTimer(cout, \"ComparisonCircuitUnivar\");\n printNamedTimer(cout, \"EqualityCircuit\");\n printNamedTimer(cout, \"ShiftMul\");\n printNamedTimer(cout, \"ShiftAdd\");\n printNamedTimer(cout, \"Comparison\");\n printNamedTimer(cout, \"Sorting\");\n\n const FHEtimer* sort_timer = getTimerByName(\"Sorting\");\n\n cout << \"Avg. time per batch: \" << 1000.0 * sort_timer->getTime()/static_cast(run+1)/static_cast(numbers_size) << \" ms\" << endl;\n cout << \"Number of integers in one ciphertext \"<< numbers_size << endl;\n\n // remove the line below if it gives bizarre results \n ctxt_out[0].cleanUp();\n capacity = ctxt_out[0].bitCapacity();\n cout << \"Final capacity: \" << capacity << endl;\n if (capacity < min_capacity)\n min_capacity = capacity;\n cout << \"Min. capacity: \" << min_capacity << endl;\n cout << \"Final size: \" << ctxt_out[0].logOfPrimeSet()/log(2.0) << endl;\n \n for (int i = 0; i < num_to_sort; i++)\n {\n \tvector decrypted(nslots);\n\t ea.decrypt(ctxt_out[i], m_sk, decrypted);\n\n\t for(int j = 0; j < numbers_size; j++)\n\t { \n\t \tfor(int k = 0; k < m_expansionLen; k++)\n\t \t{\n\t \t\tif (decrypted[j * m_expansionLen + k] != expected_result[i][j * m_expansionLen + k])\n\t\t\t {\n\t\t\t \tprintf(\"Slot %ld: \", j * m_expansionLen + k);\n\t\t\t \tprintZZX(cout, decrypted[j * m_expansionLen + k], ord_p);\n\t\t\t cout << endl;\n\t\t\t cout << \"Failure\" << endl;\n\t\t\t return;\n\t\t\t }\n\t \t}\n\t }\n }\n }\n}\n\nvoid Comparator::test_compare(long runs) const\n{\n //reset timers\n setTimersOn();\n \n // initialize the random generator\n random_device rd;\n mt19937 eng(rd());\n uniform_int_distribution distr_u;\n uniform_int_distribution distr_i;\n\n // get EncryptedArray\n const EncryptedArray& ea = m_context.getEA();\n\n //extract number of slots\n long nslots = ea.size();\n\n //get p\n unsigned long p = m_context.getP();\n\n //order of p\n unsigned long ord_p = m_context.getOrdP();\n\n //amount of numbers in one ciphertext\n unsigned long numbers_size = nslots / m_expansionLen;\n\n // number of slots occupied by encoded numbers\n unsigned long occupied_slots = numbers_size * m_expansionLen;\n\n //encoding base, ((p+1)/2)^d\n //if 2-variable comparison polynomial is used, it must be p^d\n unsigned long enc_base = (p + 1) >> 1;\n if (m_type == BI || m_type == TAN)\n {\n \tenc_base = p;\n }\n\n unsigned long digit_base = power_long(enc_base, m_slotDeg);\n\n //check that field_size^expansion_len fits into 64-bits\n int space_bit_size = static_cast(ceil(m_expansionLen * log2(digit_base)));\n cout << \"Space bit size \" << space_bit_size << endl;\n unsigned long input_range = ULONG_MAX;\n if(space_bit_size < 64)\n {\n //input_range = power_long(field_size, expansion_len);\n input_range = power_long(digit_base, m_expansionLen);\n }\n cout << \"Maximal input: \" << input_range << endl;\n\n long min_capacity = 1000;\n long capacity;\n for (int run = 0; run < runs; run++)\n {\n printf(\"Run %d started\\n\", run);\n\n vector expected_result(occupied_slots);\n vector decrypted(occupied_slots);\n\n // Create the plaintext polynomials for the text and for the pattern\n vector pol_x(nslots);\n vector pol_y(nslots);\n \n unsigned long input_x;\n unsigned long input_y;\n ZZX pol_slot;\n\n for (int i = 0; i < numbers_size; i++)\n {\n input_x = distr_u(eng) % input_range;\n input_y = distr_u(eng) % input_range;\n\n if(m_verbose)\n {\n cout << \"Input \" << i << endl;\n cout << input_x << endl;\n cout << input_y << endl;\n }\n\n if (input_x < input_y)\n {\n expected_result[i * m_expansionLen] = ZZX(INIT_MONO, 0, 1);\n }\n else\n {\n expected_result[i * m_expansionLen] = ZZX(INIT_MONO, 0, 0);\n }\n\n vector decomp_int_x;\n vector decomp_int_y;\n vector decomp_char;\n\n //decomposition of input integers\n digit_decomp(decomp_int_x, input_x, digit_base, m_expansionLen);\n digit_decomp(decomp_int_y, input_y, digit_base, m_expansionLen);\n\n if(m_verbose)\n {\n \tcout << \"Input decomposition into digits\" << endl;\n \tfor(int j = 0; j < m_expansionLen; j++)\n \t{\n \t\tcout << decomp_int_x[j] << \" \" << decomp_int_y[j] << endl;\n \t}\n }\n\n //encoding of slots\n for (int j = 0; j < m_expansionLen; j++)\n {\n //decomposition of a digit\n int_to_slot(pol_slot, decomp_int_x[j], enc_base);\n pol_x[i * m_expansionLen + j] = pol_slot;\n }\n\n for (int j = 0; j < m_expansionLen; j++)\n {\n //decomposition of a digit\n int_to_slot(pol_slot, decomp_int_y[j], enc_base);\n pol_y[i * m_expansionLen + j] = pol_slot;\n }\n }\n\n if(m_verbose)\n {\n cout << \"Input\" << endl;\n for(int i = 0; i < nslots; i++)\n {\n printZZX(cout, pol_x[i], ord_p);\n printZZX(cout, pol_y[i], ord_p);\n cout << endl;\n }\n }\n\n Ctxt ctxt_x(m_pk);\n Ctxt ctxt_y(m_pk);\n ea.encrypt(ctxt_x, m_pk, pol_x);\n ea.encrypt(ctxt_y, m_pk, pol_y);\n \n Ctxt ctxt_res(m_pk);\n\n // comparison function\n cout << \"Start of comparison\" << endl;\n compare(ctxt_res, ctxt_x, ctxt_y);\n\n if(m_verbose)\n {\n cout << \"Input\" << endl;\n for(int j = 0; j < nslots; j++)\n {\n printZZX(cout, pol_x[j], ord_p);\n printZZX(cout, pol_y[j], ord_p);\n cout << endl;\n }\n\n cout << \"Output\" << endl;\n print_decrypted(ctxt_res);\n cout << endl;\n }\n printNamedTimer(cout, \"Extraction\");\n printNamedTimer(cout, \"ComparisonCircuitBivar\");\n printNamedTimer(cout, \"ComparisonCircuitUnivar\");\n printNamedTimer(cout, \"EqualityCircuit\");\n printNamedTimer(cout, \"ShiftMul\");\n printNamedTimer(cout, \"ShiftAdd\");\n printNamedTimer(cout, \"Comparison\");\n\n const FHEtimer* comp_timer = getTimerByName(\"Comparison\");\n\n cout << \"Avg. time per integer: \" << 1000.0 * comp_timer->getTime()/static_cast(run+1)/static_cast(numbers_size) << \" ms\" << endl;\n cout << \"Number of integers in one ciphertext \"<< numbers_size << endl;\n\n // remove the line below if it gives bizarre results \n ctxt_res.cleanUp();\n capacity = ctxt_res.bitCapacity();\n cout << \"Final capacity: \" << capacity << endl;\n if (capacity < min_capacity)\n min_capacity = capacity;\n cout << \"Min. capacity: \" << min_capacity << endl;\n cout << \"Final size: \" << ctxt_res.logOfPrimeSet()/log(2.0) << endl;\n ea.decrypt(ctxt_res, m_sk, decrypted);\n\n for(int i = 0; i < numbers_size; i++)\n { \n if (decrypted[i * m_expansionLen] != expected_result[i * m_expansionLen])\n {\n printf(\"Slot %ld: \", i * m_expansionLen);\n printZZX(cout, decrypted[i * m_expansionLen], ord_p);\n cout << endl;\n cout << \"Failure\" << endl;\n return;\n }\n }\n cout << endl;\n }\n}\n\nvoid Comparator::test_min_max(long runs) const\n{\n\t//reset timers\n setTimersOn();\n \n // initialize the random generator\n random_device rd;\n mt19937 eng(rd());\n uniform_int_distribution distr_u;\n uniform_int_distribution distr_i;\n\n // get EncryptedArray\n const EncryptedArray& ea = m_context.getEA();\n\n //extract number of slots\n long nslots = ea.size();\n\n //get p\n unsigned long p = m_context.getP();\n\n //order of p\n unsigned long ord_p = m_context.getOrdP();\n\n //amount of numbers in one ciphertext\n unsigned long numbers_size = nslots / m_expansionLen;\n\n // number of slots occupied by encoded numbers\n unsigned long occupied_slots = numbers_size * m_expansionLen;\n\n //encoding base, ((p+1)/2)^d\n //if 2-variable comparison polynomial is used, it must be p^d\n unsigned long enc_base = (p + 1) >> 1;\n if (m_type == BI || m_type == TAN)\n {\n \tenc_base = p;\n }\n\n unsigned long digit_base = power_long(enc_base, m_slotDeg);\n\n //check that field_size^expansion_len fits into 64-bits\n int space_bit_size = static_cast(ceil(m_expansionLen * log2(digit_base)));\n unsigned long input_range = ULONG_MAX;\n if(space_bit_size < 64)\n {\n //input_range = power_long(field_size, expansion_len);\n input_range = power_long(digit_base, m_expansionLen);\n }\n cout << \"Maximal input: \" << input_range << endl;\n\n long min_capacity = 1000;\n long capacity;\n for (int run = 0; run < runs; run++)\n {\n printf(\"Run %d started\\n\", run);\n\n vector expected_result_min(occupied_slots);\n vector expected_result_max(occupied_slots);\n vector decrypted_min(occupied_slots);\n vector decrypted_max(occupied_slots);\n\n // Create the plaintext polynomials for the text and for the pattern\n vector pol_x(nslots);\n vector pol_y(nslots);\n \n unsigned long input_x;\n unsigned long input_y;\n ZZX pol_slot;\n\n for (int i = 0; i < numbers_size; i++)\n {\n input_x = distr_u(eng) % input_range;\n input_y = distr_u(eng) % input_range;\n\n if(m_verbose)\n {\n cout << \"Input\" << endl;\n cout << input_x << endl;\n cout << input_y << endl;\n }\n\n vector decomp_int_x;\n vector decomp_int_y;\n vector decomp_char;\n\n //decomposition of input integers\n digit_decomp(decomp_int_x, input_x, digit_base, m_expansionLen);\n digit_decomp(decomp_int_y, input_y, digit_base, m_expansionLen);\n\n //encoding of slots\n for (int j = 0; j < m_expansionLen; j++)\n {\n //decomposition of a digit\n int_to_slot(pol_slot, decomp_int_x[j], enc_base);\n pol_x[i * m_expansionLen + j] = pol_slot;\n }\n\n for (int j = 0; j < m_expansionLen; j++)\n {\n //decomposition of a digit\n int_to_slot(pol_slot, decomp_int_y[j], enc_base);\n pol_y[i * m_expansionLen + j] = pol_slot;\n }\n\n if (input_x < input_y)\n {\n \tfor (int j = 0; j < m_expansionLen; j++)\n \t{\n\t expected_result_min[i * m_expansionLen + j] = pol_x[i * m_expansionLen + j];\n\t expected_result_max[i * m_expansionLen + j] = pol_y[i * m_expansionLen + j];\n\t }\n }\n else\n {\n for (int j = 0; j < m_expansionLen; j++)\n \t{\n\t expected_result_min[i * m_expansionLen + j] = pol_y[i * m_expansionLen + j];\n\t expected_result_max[i * m_expansionLen + j] = pol_x[i * m_expansionLen + j];\n\t }\n }\n }\n\n if(m_verbose)\n {\n cout << \"Input\" << endl;\n for(int i = 0; i < nslots; i++)\n {\n printZZX(cout, pol_x[i], ord_p);\n printZZX(cout, pol_y[i], ord_p);\n cout << endl;\n }\n }\n\n Ctxt ctxt_x(m_pk);\n Ctxt ctxt_y(m_pk);\n ea.encrypt(ctxt_x, m_pk, pol_x);\n ea.encrypt(ctxt_y, m_pk, pol_y);\n \n Ctxt ctxt_min(m_pk);\n Ctxt ctxt_max(m_pk);\n\n // comparison function\n cout << \"Start of Min/Max\" << endl;\n min_max(ctxt_min, ctxt_max, ctxt_x, ctxt_y);\n\n if(m_verbose)\n {\n cout << \"Input\" << endl;\n for(int i = 0; i < nslots; i++)\n {\n printZZX(cout, pol_x[i], ord_p);\n printZZX(cout, pol_y[i], ord_p);\n cout << endl;\n }\n\n cout << \"Output min\" << endl;\n print_decrypted(ctxt_min);\n cout << endl;\n\n cout << \"Output max\" << endl;\n print_decrypted(ctxt_max);\n cout << endl;\n }\n printNamedTimer(cout, \"Extraction\");\n printNamedTimer(cout, \"MinMax\");\n\n const FHEtimer* min_max_timer = getTimerByName(\"MinMax\");\n\n cout << \"Avg. time per integer: \" << 1000.0 * min_max_timer->getTime()/static_cast(run+1)/static_cast(numbers_size) << \" ms\" << endl;\n cout << \"Number of integers in one ciphertext \"<< numbers_size << endl;\n\n // remove the line below if it gives bizarre results \n ctxt_min.cleanUp();\n capacity = ctxt_min.bitCapacity();\n ctxt_max.cleanUp();\n cout << \"Final capacity: \" << capacity << endl;\n if (capacity < min_capacity)\n min_capacity = capacity;\n cout << \"Min. capacity: \" << min_capacity << endl;\n cout << \"Final size: \" << ctxt_min.logOfPrimeSet()/log(2.0) << endl;\n ea.decrypt(ctxt_min, m_sk, decrypted_min);\n ea.decrypt(ctxt_max, m_sk, decrypted_max);\n\n for(int i = 0; i < numbers_size; i++)\n { \n if (decrypted_min[i * m_expansionLen] != expected_result_min[i * m_expansionLen])\n {\n printf(\"Slot %ld: \", i * m_expansionLen);\n printZZX(cout, decrypted_min[i * m_expansionLen], ord_p);\n cout << endl;\n cout << \"Failure\" << endl;\n return;\n }\n }\n cout << endl;\n for(int i = 0; i < numbers_size; i++)\n { \n if (decrypted_max[i * m_expansionLen] != expected_result_max[i * m_expansionLen])\n {\n printf(\"Slot %ld: \", i * m_expansionLen);\n printZZX(cout, decrypted_max[i * m_expansionLen], ord_p);\n cout << endl;\n cout << \"Failure\" << endl;\n return;\n }\n }\n cout << endl;\n }\n}\n\nvoid Comparator::test_array_min(int input_len, long depth, long runs) const\n{\n\t//reset timers\n setTimersOn();\n \n // initialize the random generator\n random_device rd;\n mt19937 eng(rd());\n uniform_int_distribution distr_u;\n uniform_int_distribution distr_i;\n\n // get EncryptedArray\n const EncryptedArray& ea = m_context.getEA();\n\n //extract number of slots\n long nslots = ea.size();\n\n //get p\n unsigned long p = m_context.getP();\n\n //order of p\n unsigned long ord_p = m_context.getOrdP();\n\n //amount of numbers in one ciphertext\n unsigned long numbers_size = nslots / m_expansionLen;\n\n // number of slots occupied by encoded numbers\n unsigned long occupied_slots = numbers_size * m_expansionLen;\n\n //encoding base, ((p+1)/2)^d\n //if 2-variable comparison polynomial is used, it must be p^d\n unsigned long enc_base = (p + 1) >> 1;\n if (m_type == BI || m_type == TAN)\n {\n \tenc_base = p;\n }\n\n unsigned long digit_base = power_long(enc_base, m_slotDeg);\n\n //check that field_size^expansion_len fits into 64-bits\n int space_bit_size = static_cast(ceil(m_expansionLen * log2(digit_base)));\n unsigned long input_range = ULONG_MAX;\n if(space_bit_size < 64)\n {\n //input_range = power_long(field_size, expansion_len);\n input_range = power_long(digit_base, m_expansionLen);\n }\n cout << \"Maximal input: \" << input_range << endl;\n\n long min_capacity = 1000;\n long capacity;\n\n for (int run = 0; run < runs; run++)\n {\n printf(\"Run %d started\\n\", run);\n\n vector expected_result(occupied_slots,ZZX(INIT_MONO,0,0));\n \n // vector of input longs\n vector> input_xs;\n for (int i = 0; i < numbers_size; i++)\n {\n \tvector tmp_vec(input_len,0);\n \tinput_xs.push_back(tmp_vec);\n }\n\n ZZX pol_slot;\n\n //ciphertexts to sort\n vector ctxt_in;\n\n //sorted ciphertexts\n Ctxt ctxt_out(m_pk);\n\n for (int i = 0; i < input_len; i++)\n {\n\t\t// the plaintext polynomials\n\t\tvector pol_x(nslots);\n\n\t\t//encoding of slots\n\t\tfor (int k = 0; k < numbers_size; k++)\n\t\t{\n\t\t\tunsigned long input_x = distr_u(eng) % input_range;\n\n\t\t\tinput_xs[k][i] = input_x;\n\t\t\n\t\t\tif(m_verbose)\n\t\t\t{\n\t\t\t\tcout << \"Input\" << endl;\n\t\t\t\tcout << input_x << endl;\n\t\t\t}\n\n\t\t\tvector decomp_int_x;\n\n\t\t\t//decomposition of input integers\n\t\t\tdigit_decomp(decomp_int_x, input_x, digit_base, m_expansionLen);\n\t\t\tfor (int j = 0; j < m_expansionLen; j++)\n\t\t\t{\n\t\t\t //decomposition of a digit\n\t\t\t int_to_slot(pol_slot, decomp_int_x[j], enc_base);\n\t\t\t pol_x[k * m_expansionLen + j] = pol_slot;\n\t\t\t}\n\t\t}\n\n \tif(m_verbose)\n\t {\n\t cout << \"Input\" << endl;\n\t for(int j = 0; j < nslots; j++)\n\t {\n\t printZZX(cout, pol_x[j], ord_p);\n\t cout << endl;\n\t }\n\t }\n\n\t Ctxt ctxt_x(m_pk);\n \tea.encrypt(ctxt_x, m_pk, pol_x);\n\n \tctxt_in.push_back(ctxt_x);\n }\n\n //cout << \"Input\" << endl;\n vector output_xs(numbers_size, 0);\n for (int i = 0; i < numbers_size; i++)\n {\n \t/*\n \tfor (int j = 0; j < input_len; j++)\n \t\tcout << input_xs[i][j] << \" \";\n \tcout << endl;\n \t*/\n \toutput_xs[i] = *std::min_element(input_xs[i].begin(), input_xs[i].end());\n \t//cout << \"Output: \" << output_xs[i] << endl;\n }\n\n //cout << \"Expected results\" << endl;\n\tfor (int k = 0; k < numbers_size; k++)\n\t{\n\t\tvector decomp_int_x;\n\n\t\t//decomposition of input integers\n\t\tdigit_decomp(decomp_int_x, output_xs[k], digit_base, m_expansionLen);\n\t\tfor (int j = 0; j < m_expansionLen; j++)\n\t\t{\n\t\t //decomposition of a digit\n\t\t int_to_slot(pol_slot, decomp_int_x[j], enc_base);\n\t\t expected_result[k * m_expansionLen + j] = pol_slot;\n\t\t}\n\t}\n\n\t/*\n\tcout << input_len-1-i << endl;\n\tfor(int j = 0; j < nslots; j++)\n\t{\n\t\tprintZZX(cout, expected_result[input_len-1-i][j], ord_p);\n \tcout << endl;\n\t}\n\t*/\n\n // comparison function\n cout << \"Start of array minimum\" << endl;\n this->array_min(ctxt_out, ctxt_in, depth);\n\n printNamedTimer(cout, \"Extraction\");\n printNamedTimer(cout, \"ComparisonCircuitBivar\");\n printNamedTimer(cout, \"ComparisonCircuitUnivar\");\n printNamedTimer(cout, \"EqualityCircuit\");\n printNamedTimer(cout, \"ShiftMul\");\n printNamedTimer(cout, \"ShiftAdd\");\n printNamedTimer(cout, \"Comparison\");\n printNamedTimer(cout, \"ArrayMin\");\n\n const FHEtimer* sort_timer = getTimerByName(\"ArrayMin\");\n\n cout << \"Avg. time per batch: \" << 1000.0 * sort_timer->getTime()/static_cast(run+1)/static_cast(numbers_size) << \" ms\" << endl;\n cout << \"Number of integers in one ciphertext \"<< numbers_size << endl;\n\n // remove the line below if it gives bizarre results \n ctxt_out.cleanUp();\n capacity = ctxt_out.bitCapacity();\n cout << \"Final capacity: \" << capacity << endl;\n if (capacity < min_capacity)\n min_capacity = capacity;\n cout << \"Min. capacity: \" << min_capacity << endl;\n cout << \"Final size: \" << ctxt_out.logOfPrimeSet()/log(2.0) << endl;\n \n\tvector decrypted(nslots);\n ea.decrypt(ctxt_out, m_sk, decrypted);\n\n for(int j = 0; j < numbers_size; j++)\n { \n \tfor(int k = 0; k < m_expansionLen; k++)\n \t{\n \t\tif (decrypted[j * m_expansionLen + k] != expected_result[j * m_expansionLen + k])\n\t\t {\n\t\t \tprintf(\"Slot %ld: \", j * m_expansionLen + k);\n\t\t \tprintZZX(cout, decrypted[j * m_expansionLen + k], ord_p);\n\t\t cout << endl;\n\t\t cout << \"Failure\" << endl;\n\t\t return;\n\t\t }\n \t}\n }\n }\n}\n", "meta": {"hexsha": "7ca5c77559d7f65d9799d633f146e5419e1f6359", "size": 73359, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "code/comparator.cpp", "max_stars_repo_name": "iliailia/comparison-circuit-over-fq", "max_stars_repo_head_hexsha": "bc48a9101278997f0847b6ace59c8f3b83884dc0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12.0, "max_stars_repo_stars_event_min_datetime": "2021-03-24T07:58:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T00:41:08.000Z", "max_issues_repo_path": "code/comparator.cpp", "max_issues_repo_name": "iliailia/comparison-circuit-over-fq", "max_issues_repo_head_hexsha": "bc48a9101278997f0847b6ace59c8f3b83884dc0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4.0, "max_issues_repo_issues_event_min_datetime": "2021-03-24T03:03:11.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-01T09:23:59.000Z", "max_forks_repo_path": "code/comparator.cpp", "max_forks_repo_name": "iliailia/comparison-circuit-over-fq", "max_forks_repo_head_hexsha": "bc48a9101278997f0847b6ace59c8f3b83884dc0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2022-01-19T16:28:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-19T16:28:37.000Z", "avg_line_length": 25.6410346033, "max_line_length": 362, "alphanum_fraction": 0.6169113538, "num_tokens": 24385, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8479677506936879, "lm_q2_score": 0.588889130767832, "lm_q1q2_score": 0.49935899162515957}} {"text": "// This file is part of the dune-gdt project:\n// https://github.com/dune-community/dune-gdt\n// Copyright 2010-2017 dune-gdt developers and contributors. All rights reserved.\n// License: Dual licensed as BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)\n// or GPL-2.0+ (http://opensource.org/licenses/gpl-license)\n// with \"runtime exception\" (http://www.dune-project.org/license.html)\n// Authors:\n// Tobias Leibner (2017)\n\n#ifndef DUNE_GDT_HYPERBOLIC_PROBLEMS_MOMENTMODELS_BASISFUNCTIONS_SPHERICALHARMONICS_HH\n#define DUNE_GDT_HYPERBOLIC_PROBLEMS_MOMENTMODELS_BASISFUNCTIONS_SPHERICALHARMONICS_HH\n\n#include \n#include \n\n#include \"base.hh\"\n\nnamespace Dune {\nnamespace GDT {\nnamespace Hyperbolic {\nnamespace Problems {\n\n\n// TODO: use complex arithmetic, currently only usable for Pn Models in 2D, test for only_positive = false\ntemplate \nclass SphericalHarmonics\n : public BasisfunctionsInterface\n{\npublic:\n static const size_t dimDomain = 3;\n static const size_t dimRange = only_positive ? ((order + 1) * (order + 2)) / 2 : (order + 1) * (order + 1);\n static const size_t dimFlux = fluxDim;\n\nprivate:\n typedef BasisfunctionsInterface BaseType;\n\npublic:\n using typename BaseType::DomainType;\n using typename BaseType::RangeType;\n using typename BaseType::MatrixType;\n template \n using VisualizerType = typename BaseType::template VisualizerType;\n\n virtual RangeType evaluate(const DomainType& v) const override\n {\n const auto v_spherical = XT::Common::CoordinateConverter::to_spherical(v);\n return evaluate_in_spherical_coords(v_spherical);\n } // ... evaluate(...)\n\n RangeType evaluate_in_spherical_coords(const FieldVector& coords) const\n {\n const DomainFieldType theta = coords[0];\n const DomainFieldType phi = coords[1];\n RangeType ret(0);\n // TODO: use complex arithmetic, remove real() call\n for (size_t ll = 0; ll <= order; ++ll)\n for (int mm = only_positive ? 0 : -int(ll); mm <= int(ll); ++mm)\n ret[helper::pos(ll, mm)] = boost::math::spherical_harmonic(ll, mm, theta, phi).real();\n return ret;\n } // ... evaluate(...)\n\n virtual RangeType integrated() const override\n {\n RangeType ret(0);\n ret[0] = std::sqrt(4. * M_PI);\n return ret;\n }\n\n virtual MatrixType mass_matrix() const override\n {\n MatrixType M(dimRange, dimRange, 0);\n for (size_t rr = 0; rr < dimRange; ++rr)\n M[rr][rr] = 1;\n return M;\n }\n\n virtual MatrixType mass_matrix_inverse() const override\n {\n return mass_matrix();\n }\n\n virtual FieldVector mass_matrix_with_v() const override\n {\n FieldVector ret(MatrixType(dimRange, dimRange, 0));\n ret[0] = create_Bx();\n ret[1] = create_Bz();\n // if (dimFlux == 3)\n // ret[2] = create_By();\n return ret;\n } // ... mass_matrix_with_v()\n\n template \n VisualizerType visualizer() const\n {\n return [](const DiscreteFunctionType& u_n, const std::string& filename_prefix, const size_t ii) {\n component_visualizer(u_n, filename_prefix, ii, std::sqrt(4 * M_PI));\n };\n }\n\n std::pair calculate_isotropic_distribution(const RangeType& u) const\n {\n RangeType u_iso(0), alpha_iso(0);\n u_iso[0] = u[0];\n alpha_iso[0] = std::log(u[0] / (4. * M_PI));\n return std::make_pair(u_iso, alpha_iso);\n }\n\nprivate:\n static RangeFieldType A_lm(const size_t l, const int m)\n {\n return std::sqrt((l + m) * (l - m) / ((2. * l + 1.) * (2. * l - 1.)));\n }\n\n static RangeFieldType B_lm(const size_t l, const int m)\n {\n return std::sqrt((l + m) * (l + m - 1.) / ((2. * l + 1.) * (2. * l - 1.)));\n }\n\n static MatrixType create_Bx()\n {\n MatrixType Bx(dimRange, dimRange, 0);\n const auto& pos = helper::pos;\n for (size_t l1 = 0; l1 <= order; ++l1) {\n for (int m1 = only_positive ? 0 : -int(l1); size_t(std::abs(m1)) <= l1; ++m1) {\n for (size_t l2 = 0; l2 <= order; ++l2) {\n for (int m2 = -int(l2); size_t(std::abs(m2)) <= l2; ++m2) {\n size_t row = pos(l1, m1);\n size_t col = pos(l2, only_positive ? std::abs(m2) : m2);\n RangeFieldType factor = !only_positive ? 1. : (m2 < 0 ? std::pow(-1., m2) : 1.);\n if (l1 == l2 + 1 && m1 == m2 + 1)\n Bx[row][col] += -0.5 * factor * B_lm(l2 + 1, m2 + 1);\n if (l1 == l2 - 1 && m1 == m2 + 1)\n Bx[row][col] += 0.5 * factor * B_lm(l2, -m2);\n if (l1 == l2 + 1 && m1 == m2 - 1)\n Bx[row][col] += 0.5 * factor * B_lm(l2 + 1, -m2 - 1);\n if (l1 == l2 - 1 && m1 == m2 - 1)\n Bx[row][col] += -0.5 * factor * B_lm(l2, m2);\n } // m2\n } // l2\n } // m1\n } // l1\n return Bx;\n } // ... create_Bx()\n\n // static MatrixType create_By()\n // {\n // MatrixType By(dimRange, dimRange, 0);\n // const auto& pos = helper::pos;\n // for (size_t l1 = 0; l1 <= order; ++l1) {\n // for (int m1 = only_positive ? 0 : -l1; size_t(std::abs(m1)) <= l1; ++m1) {\n // for (size_t l2 = 0; l2 <= order; ++l2) {\n // for (int m2 = -int(l2); size_t(std::abs(m2)) <= l2; ++m2) {\n // size_t row = pos(l1, m1);\n // size_t col = pos(l2, only_positive ? std::abs(m2) : m2);\n // RangeFieldType factor = !only_positive ? 1. : (m2 < 0 ? std::pow(-1., m2) : 1.);\n // if (l1 == l2 + 1 && m1 == m2 + 1)\n // By[row][col] += 0.5 * factor * std::complex(0, 1) * B_lm(l2 + 1, m2 + 1);\n // if (l1 == l2 - 1 && m1 == m2 + 1)\n // By[row][col] += -0.5 * factor * std::complex(0, 1) * B_lm(l2, -m2);\n // if (l1 == l2 + 1 && m1 == m2 - 1)\n // By[row][col] += 0.5 * factor * std::complex(0, 1) * B_lm(l2 + 1, -m2 - 1);\n // if (l1 == l2 - 1 && m1 == m2 - 1)\n // By[row][col] += -0.5 * factor * std::complex(0, 1) * B_lm(l2, m2);\n // } // m2\n // } // l2\n // } // m1\n // } // l1\n // return By;\n // } // ... create_By()\n\n static MatrixType create_Bz()\n {\n MatrixType Bz(dimRange, dimRange, 0);\n const auto& pos = helper::pos;\n for (size_t l1 = 0; l1 <= order; ++l1) {\n for (int m1 = only_positive ? 0. : -int(l1); size_t(std::abs(m1)) <= l1; ++m1) {\n for (size_t l2 = 0; l2 <= order; ++l2) {\n size_t row = pos(l1, m1);\n size_t col = pos(l2, m1); // m1 == m2, else matrix entry is 0\n if (l1 == l2 + 1)\n Bz[row][col] += A_lm(l2 + 1, m1);\n if (l1 == l2 - 1)\n Bz[row][col] += A_lm(l2, m1);\n } // l2\n } // m1\n } // l1\n return Bz;\n }\n\n template \n struct helper\n {\n // Converts a pair (l, m) to a vector index. The vector is ordered by l first, then by m.\n // Each l has 2l+1 values of m, so (l, m) has position\n // (\\sum_{k=0}^{l-1} (2k+1)) + (m+l) = l^2 + m + l\n static size_t pos(const size_t l, const int m)\n {\n return size_t(l * l + m + l);\n }\n };\n\n template \n struct helper\n {\n // Converts a pair (l, m) to a vector index. The vector is ordered by l first, then by m.\n // Each l has l+1 non-negative values of m, so (l, m) has position\n // (\\sum_{k=0}^{l-1} (l+1)) + m = l(l+1)/2 + m\n static size_t pos(const size_t l, const int m)\n {\n return l * (l + 1) / 2 + m;\n }\n };\n}; // class SphericalHarmonics\n\n\ntemplate \nclass RealSphericalHarmonics\n : public BasisfunctionsInterface\n{\npublic:\n static const size_t dimDomain = 3;\n static const size_t dimFlux = fluxDim;\n static const size_t dimRange = only_even ? ((order + 1) * (order + 2)) / 2 : (order + 1) * (order + 1);\n\nprivate:\n typedef BasisfunctionsInterface BaseType;\n\npublic:\n typedef typename Dune::QuadratureRule QuadratureType;\n using typename BaseType::DomainType;\n using typename BaseType::RangeType;\n using typename BaseType::MatrixType;\n template \n using VisualizerType = typename BaseType::template VisualizerType;\n\n virtual RangeType evaluate(const DomainType& v) const override\n {\n const auto v_spherical = XT::Common::CoordinateConverter::to_spherical(v);\n return evaluate_in_spherical_coords(v_spherical);\n } // ... evaluate(...)\n\n RangeType evaluate_in_spherical_coords(const FieldVector& coords) const\n {\n const DomainFieldType theta = coords[0];\n const DomainFieldType phi = coords[1];\n RangeType ret(0);\n for (size_t ll = 0; ll <= order; ++ll)\n for (int mm = -int(ll); mm <= int(ll); ++mm)\n if (!only_even || !((mm + ll) % 2))\n ret[helper::pos(ll, mm)] = evaluate_lm(theta, phi, int(ll), mm);\n return ret;\n } // ... evaluate(...)\n\n virtual RangeType integrated() const override\n {\n RangeType ret(0);\n ret[0] = std::sqrt(4. * M_PI);\n return ret;\n }\n\n virtual MatrixType mass_matrix() const override\n {\n MatrixType M(dimRange, dimRange, 0);\n for (size_t rr = 0; rr < dimRange; ++rr)\n M[rr][rr] = 1;\n return M;\n }\n\n virtual MatrixType mass_matrix_inverse() const override\n {\n return mass_matrix();\n }\n\n virtual FieldVector mass_matrix_with_v() const override\n {\n FieldVector ret(MatrixType(dimRange, dimRange, 0));\n ret[0] = create_Bx();\n ret[1] = create_By();\n if (dimFlux == 3)\n ret[2] = create_Bz();\n return ret;\n } // ... mass_matrix_with_v()\n\n std::pair calculate_isotropic_distribution(const RangeType& u) const\n {\n RangeType u_iso(0), alpha_iso(0);\n u_iso[0] = u[0];\n alpha_iso[0] = std::log(u[0] / std::sqrt(4. * M_PI)) * std::sqrt(4. * M_PI);\n return std::make_pair(u_iso, alpha_iso);\n }\n\n template \n VisualizerType visualizer() const\n {\n return [](const DiscreteFunctionType& u_n, const std::string& filename_prefix, const size_t ii) {\n component_visualizer(u_n, filename_prefix, ii, std::sqrt(4 * M_PI));\n };\n }\n\n RangeFieldType realizability_limiter_max(const RangeType& u, const RangeType& u_bar) const\n {\n return 2 * std::max(u[0], u_bar[0]);\n }\n\nprivate:\n static RangeFieldType A_lm(const size_t l, const int m)\n {\n return std::sqrt((l + m) * (l - m) / ((2. * l + 1.) * (2. * l - 1.)));\n }\n\n static RangeFieldType B_lm(const size_t l, const int m)\n {\n return std::sqrt((l + m) * (l + m - 1.) / ((2. * l + 1.) * (2. * l - 1.)));\n }\n\n static MatrixType create_Bx()\n {\n MatrixType Bx(dimRange, dimRange, 0.);\n const auto& pos = helper::pos;\n for (size_t l1 = 0; l1 <= order; ++l1) {\n for (int m1 = -int(l1); size_t(std::abs(m1)) <= l1; ++m1) {\n for (size_t l2 = 0; l2 <= order; ++l2) {\n for (int m2 = -int(l2); size_t(std::abs(m2)) <= l2; ++m2) {\n if (!only_even || (!((m1 + l1) % 2) && !((m2 + l2) % 2))) {\n if (l1 == l2 - 1 && m1 == m2 - 1 && m2 > 0)\n Bx[pos(l1, m1)][pos(l2, m2)] += 0.5 * std::sqrt(1. + (m2 == 1)) * B_lm(l2, m2);\n if (l1 == l2 + 1 && m1 == m2 - 1 && m2 > 0)\n Bx[pos(l1, m1)][pos(l2, m2)] += -0.5 * std::sqrt(1. + (m2 == 1)) * B_lm(l2 + 1, -m2 + 1);\n if (l1 == l2 - 1 && m1 == m2 + 1 && m2 > 0)\n Bx[pos(l1, m1)][pos(l2, m2)] += -0.5 * B_lm(l2, -m2);\n if (l1 == l2 + 1 && m1 == m2 + 1 && m2 > 0)\n Bx[pos(l1, m1)][pos(l2, m2)] += 0.5 * B_lm(l2 + 1, m2 + 1);\n if (l1 == l2 - 1 && m1 == m2 + 1 && m2 < 0)\n Bx[pos(l1, m1)][pos(l2, m2)] += 0.5 * (1. - (-m2 == 1)) * B_lm(l2, -m2);\n if (l1 == l2 + 1 && m1 == m2 + 1 && m2 < 0)\n Bx[pos(l1, m1)][pos(l2, m2)] += -0.5 * (1. - (-m2 == 1)) * B_lm(l2 + 1, m2 + 1);\n if (l1 == l2 - 1 && m1 == m2 - 1 && m2 < 0)\n Bx[pos(l1, m1)][pos(l2, m2)] += -0.5 * B_lm(l2, m2);\n if (l1 == l2 + 1 && m1 == m2 - 1 && m2 < 0)\n Bx[pos(l1, m1)][pos(l2, m2)] += 0.5 * B_lm(l2 + 1, -m2 + 1);\n if (l1 == l2 - 1 && m1 == 1 && m2 == 0)\n Bx[pos(l1, m1)][pos(l2, m2)] += -1. / std::sqrt(2.) * B_lm(l2, 0);\n if (l1 == l2 + 1 && m1 == 1 && m2 == 0)\n Bx[pos(l1, m1)][pos(l2, m2)] += 1. / std::sqrt(2.) * B_lm(l2 + 1, 1);\n }\n } // m2\n } // l2\n } // m1\n } // l1\n return Bx;\n }\n\n static MatrixType create_By()\n {\n MatrixType By(dimRange, dimRange, 0.);\n const auto& pos = helper::pos;\n for (size_t l1 = 0; l1 <= order; ++l1) {\n for (int m1 = -int(l1); size_t(std::abs(m1)) <= l1; ++m1) {\n for (size_t l2 = 0; l2 <= order; ++l2) {\n for (int m2 = -int(l2); size_t(std::abs(m2)) <= l2; ++m2) {\n if (!only_even || (!((m1 + l1) % 2) && !((m2 + l2) % 2))) {\n if (l1 == l2 + 1 && m1 == -m2 + 1 && m2 > 0)\n By[pos(l1, m1)][pos(l2, m2)] += 0.5 * (1. - (m2 == 1)) * B_lm(l2 + 1, -m2 + 1);\n if (l1 == l2 - 1 && m1 == -m2 + 1 && m2 > 0)\n By[pos(l1, m1)][pos(l2, m2)] += -0.5 * (1. - (m2 == 1)) * B_lm(l2, m2);\n if (l1 == l2 - 1 && m1 == -m2 - 1 && m2 > 0)\n By[pos(l1, m1)][pos(l2, m2)] += -0.5 * B_lm(l2, -m2);\n if (l1 == l2 + 1 && m1 == -m2 - 1 && m2 > 0)\n By[pos(l1, m1)][pos(l2, m2)] += 0.5 * B_lm(l2 + 1, m2 + 1);\n if (l1 == l2 - 1 && m1 == -m2 - 1 && m2 < 0)\n By[pos(l1, m1)][pos(l2, m2)] += 0.5 * std::sqrt(1. + (-m2 == 1)) * B_lm(l2, -m2);\n if (l1 == l2 + 1 && m1 == -m2 - 1 && m2 < 0)\n By[pos(l1, m1)][pos(l2, m2)] += -0.5 * std::sqrt(1. + (-m2 == 1)) * B_lm(l2 + 1, m2 + 1);\n if (l1 == l2 - 1 && m1 == -m2 + 1 && m2 < 0)\n By[pos(l1, m1)][pos(l2, m2)] += 0.5 * B_lm(l2, m2);\n if (l1 == l2 + 1 && m1 == -m2 + 1 && m2 < 0)\n By[pos(l1, m1)][pos(l2, m2)] += -0.5 * B_lm(l2 + 1, -m2 + 1);\n if (l1 == l2 - 1 && m1 == -1 && m2 == 0)\n By[pos(l1, m1)][pos(l2, m2)] += -1. / std::sqrt(2.) * B_lm(l2, 0);\n if (l1 == l2 + 1 && m1 == -1 && m2 == 0)\n By[pos(l1, m1)][pos(l2, m2)] += 1. / std::sqrt(2.) * B_lm(l2 + 1, 1);\n }\n } // m2\n } // l2\n } // m1\n } // l1\n return By;\n } // ... create_By()\n\n static MatrixType create_Bz()\n {\n MatrixType Bz(dimRange, dimRange, 0);\n const auto& pos = helper::pos;\n for (size_t l1 = 0; l1 <= order; ++l1) {\n for (int m1 = -int(l1); size_t(std::abs(m1)) <= l1; ++m1) {\n for (size_t l2 = 0; l2 <= order; ++l2) {\n for (int m2 = -int(l2); size_t(std::abs(m2)) <= l2; ++m2) {\n if (!only_even || (!((m1 + l1) % 2) && !((m2 + l2) % 2))) {\n if (m1 == m2 && l1 == l2 + 1)\n Bz[pos(l1, m1)][pos(l2, m2)] += A_lm(l2 + 1, m2);\n if (m1 == m2 && l1 == l2 - 1)\n Bz[pos(l1, m1)][pos(l2, m2)] += A_lm(l2, m2);\n }\n } // m2\n } // l2\n } // m1\n } // l1\n return Bz;\n } // ... create_Bz()\n\n template \n struct helper\n {\n // Converts a pair (l, m) to a vector index. The vector is ordered by l first, then by m.\n // Each l has 2l+1 values of m, so (l, m) has position\n // (\\sum_{k=0}^{l-1} (2k+1)) + (m+l) = l^2 + m + l\n static size_t pos(const size_t l, const int m)\n {\n return size_t(l * l + m + l);\n }\n };\n\n template \n struct helper\n {\n // Converts a pair (l, m) to a vector index. The vector is ordered by l first, then by m.\n // Each l has l+1 values of m (as only m s.t. m+l is even are considered), so (l, m) has position\n // (\\sum_{k=0}^{l-1} (k+1)) + (m+l)/2 = l(l+1)/2 + (l+m)/2\n static size_t pos(const int l, const int m)\n {\n return size_t(l * (l + 1) / 2 + (m + l) / 2);\n }\n };\n\n // Notation from Garrett, Hauck, \"A Comparison of Moment Closures for Linear Kinetic Transport Equations: The Line\n // Source Benchmark\",\n // http://www.tandfonline.com/doi/full/10.1080/00411450.2014.910226?src=recsys&, Section 4.1\n RangeFieldType N_lm(const int l, const int m) const\n {\n assert(l >= 0 && m >= 0 && m <= l);\n return std::sqrt((2. * l + 1.) * XT::Common::factorial(l - m) / (XT::Common::factorial(l + m) * 4. * M_PI));\n }\n\n RangeFieldType evaluate_lm(const DomainFieldType theta, const DomainFieldType phi, const int l, const int m) const\n {\n const auto cos_theta = std::cos(theta);\n assert(l >= 0 && std::abs(m) <= l);\n if (m < 0)\n return std::sqrt(2) * N_lm(l, -m) * boost::math::legendre_p(l, -m, cos_theta) * std::sin(-m * phi);\n else if (m == 0)\n return N_lm(l, 0) * boost::math::legendre_p(l, 0, cos_theta);\n else\n return std::sqrt(2) * N_lm(l, m) * boost::math::legendre_p(l, m, cos_theta) * std::cos(m * phi);\n }\n}; // class RealSphericalHarmonics\n\n\n} // namespace Problems\n} // namespace Hyperbolic\n} // namespace GDT\n} // namespace Dune\n\n#endif // DUNE_GDT_HYPERBOLIC_PROBLEMS_MOMENTMODELS_BASISFUNCTIONS_SPHERICALHARMONICS_HH\n", "meta": {"hexsha": "423b6b2908466228a379a6e42e3ef8ef1ea8d17d", "size": 18704, "ext": "hh", "lang": "C++", "max_stars_repo_path": "dune/gdt/test/hyperbolic/problems/momentmodels/basisfunctions/spherical_harmonics.hh", "max_stars_repo_name": "tobiasleibner/dune-gdt", "max_stars_repo_head_hexsha": "5d3dc6c7f5fd66db78ebb294d7ee4803f8e0bf5b", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dune/gdt/test/hyperbolic/problems/momentmodels/basisfunctions/spherical_harmonics.hh", "max_issues_repo_name": "tobiasleibner/dune-gdt", "max_issues_repo_head_hexsha": "5d3dc6c7f5fd66db78ebb294d7ee4803f8e0bf5b", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dune/gdt/test/hyperbolic/problems/momentmodels/basisfunctions/spherical_harmonics.hh", "max_forks_repo_name": "tobiasleibner/dune-gdt", "max_forks_repo_head_hexsha": "5d3dc6c7f5fd66db78ebb294d7ee4803f8e0bf5b", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.4599156118, "max_line_length": 116, "alphanum_fraction": 0.5324529512, "num_tokens": 6476, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8479677506936878, "lm_q2_score": 0.588889130767832, "lm_q1q2_score": 0.49935899162515945}} {"text": "#include \"OGR.h\"\n#include \n#include \n#include \n\n// ----------------------------------------------------------------------\n/*!\n * \\brief Calculate area in m^2 for a geography\n *\n * Refs: Some algorithms for polygons on a sphere\n * http://hdl.handle.net/2014/40409\n *\n * https://trac.osgeo.org/openlayers/browser/trunk/openlayers/lib/OpenLayers/Geometry/LinearRing.js\n *\n * Note: The algorithm returns a positive number for clockwise rings.\n */\n// ----------------------------------------------------------------------\n\ndouble geographic_area(const OGRLineString *theGeom)\n{\n try\n {\n double area = 0;\n\n std::size_t npoints = theGeom->getNumPoints();\n\n for (std::size_t i = 0; i < npoints - 1; i++)\n {\n double x1 = theGeom->getX(i) * boost::math::double_constants::degree;\n double y1 = theGeom->getY(i) * boost::math::double_constants::degree;\n double x2 = theGeom->getX(i + 1) * boost::math::double_constants::degree;\n double y2 = theGeom->getY(i + 1) * boost::math::double_constants::degree;\n\n area += (x2 - x1) * (2 + sin(y1) + sin(y2));\n }\n area *= 6378137.0 * 6378137.0 / 2.0;\n\n return std::abs(area);\n }\n catch (...)\n {\n throw Fmi::Exception::Trace(BCP, \"Operation failed!\");\n }\n}\n\ndouble geographic_area(const OGRLinearRing *theGeom)\n{\n try\n {\n return geographic_area(static_cast(theGeom));\n }\n catch (...)\n {\n throw Fmi::Exception::Trace(BCP, \"Operation failed!\");\n }\n}\n\n// ----------------------------------------------------------------------\n/*!\n * \\brief get_Area substitute for OGRLineString copied from OGRLinearRing::get_Area()\n *\n * The area is computed according to Green's Theorem:\n *\n * Area is \"Sum(x(i)*(y(i+1) - y(i-1)))/2\" for i = 0 to pointCount-1,\n * assuming the last point is a duplicate of the first.\n *\n */\n// ----------------------------------------------------------------------\n\ndouble metric_area(const OGRLineString *theGeom)\n{\n try\n {\n std::size_t npoints = theGeom->getNumPoints();\n if (npoints < 2)\n return 0;\n\n double area = theGeom->getX(0) * (theGeom->getY(1) - theGeom->getY(npoints - 1));\n\n for (std::size_t i = 1; i < npoints - 1; i++)\n area += theGeom->getX(i) * (theGeom->getY(i + 1) - theGeom->getY(i - 1));\n\n area += theGeom->getX(npoints - 1) * (theGeom->getY(0) - theGeom->getY(npoints - 2));\n\n return 0.5 * std::abs(area);\n }\n catch (...)\n {\n throw Fmi::Exception::Trace(BCP, \"Operation failed!\");\n }\n}\n\n// ----------------------------------------------------------------------\n/*!\n * \\brief Despeckle OGR geometry\n */\n// ----------------------------------------------------------------------\n\nOGRPolygon *despeckle_polygon(const OGRPolygon *theGeom, double theLimit, bool theGeogFlag)\n{\n try\n {\n // Quick exit if the exterior is too small\n\n const auto *exterior = theGeom->getExteriorRing();\n double area = (theGeogFlag ? geographic_area(exterior) : exterior->get_Area());\n\n if (area < theLimit)\n return nullptr;\n\n // We have at least a valid exterior\n\n auto *out = new OGRPolygon;\n out->addRingDirectly(dynamic_cast(exterior->clone()));\n\n // Remove too small holes too\n\n for (int i = 0, n = theGeom->getNumInteriorRings(); i < n; ++i)\n {\n const auto *hole = theGeom->getInteriorRing(i);\n area = (theGeogFlag ? geographic_area(hole) : hole->get_Area());\n if (area >= theLimit)\n out->addRingDirectly(dynamic_cast(hole->clone()));\n }\n\n return out;\n }\n catch (...)\n {\n throw Fmi::Exception::Trace(BCP, \"Operation failed!\");\n }\n}\n\n// ----------------------------------------------------------------------\n/*!\n * \\brief Despeckle OGR geometry\n */\n// ----------------------------------------------------------------------\n\nOGRLineString *despeckle_linestring(const OGRLineString *theGeom, double theLimit, bool theGeogFlag)\n{\n try\n {\n if (theGeom == nullptr || theGeom->IsEmpty() != 0)\n return nullptr;\n\n if (!theGeom->get_IsClosed())\n return dynamic_cast(theGeom->clone());\n\n // Despeckle closed linestrings only\n\n // TODO: Old GDAL does not have get_Area for linestrings\n // double area = (theGeogFlag ? geographic_area(theGeom) : geom->get_Area());\n double area = (theGeogFlag ? geographic_area(theGeom) : metric_area(theGeom));\n\n if (area < theLimit)\n return nullptr;\n\n return dynamic_cast(theGeom->clone());\n }\n catch (...)\n {\n throw Fmi::Exception::Trace(BCP, \"Operation failed!\");\n }\n}\n\n// ----------------------------------------------------------------------\n/*!\n * \\brief Despeckle OGR geometry\n */\n// ----------------------------------------------------------------------\n\nOGRPoint *despeckle_point(const OGRPoint *theGeom)\n{\n try\n {\n if (theGeom == nullptr || theGeom->IsEmpty() != 0)\n return nullptr;\n\n return dynamic_cast(theGeom->clone());\n }\n catch (...)\n {\n throw Fmi::Exception::Trace(BCP, \"Operation failed!\");\n }\n}\n\n// ----------------------------------------------------------------------\n/*!\n * \\brief Despeckle OGR geometry\n */\n// ----------------------------------------------------------------------\n\nOGRMultiPoint *despeckle_multipoint(const OGRMultiPoint *theGeom)\n{\n try\n {\n if (theGeom == nullptr || theGeom->IsEmpty() != 0)\n return nullptr;\n\n return dynamic_cast(theGeom->clone());\n }\n catch (...)\n {\n throw Fmi::Exception::Trace(BCP, \"Operation failed!\");\n }\n}\n\n// ----------------------------------------------------------------------\n/*!\n * \\brief Despeckle OGR geometry\n */\n// ----------------------------------------------------------------------\n\nOGRMultiLineString *despeckle_multilinestring(const OGRMultiLineString *theGeom,\n double theLimit,\n bool theGeogFlag)\n{\n try\n {\n if (theGeom == nullptr || theGeom->IsEmpty() != 0)\n return nullptr;\n ;\n\n auto *out = new OGRMultiLineString();\n\n for (int i = 0, n = theGeom->getNumGeometries(); i < n; ++i)\n {\n auto *geom = despeckle_linestring(\n dynamic_cast(theGeom->getGeometryRef(i)), theLimit, theGeogFlag);\n if (geom != nullptr)\n out->addGeometryDirectly(geom);\n }\n\n if (out->IsEmpty() == 0)\n return out;\n\n delete out;\n return nullptr;\n }\n catch (...)\n {\n throw Fmi::Exception::Trace(BCP, \"Operation failed!\");\n }\n}\n\n// ----------------------------------------------------------------------\n/*!\n * \\brief Despeckle OGR geometry\n */\n// ----------------------------------------------------------------------\n\nOGRMultiPolygon *despeckle_multipolygon(const OGRMultiPolygon *theGeom,\n double theLimit,\n bool theGeogFlag)\n{\n try\n {\n if (theGeom == nullptr || theGeom->IsEmpty() != 0)\n return nullptr;\n\n auto *out = new OGRMultiPolygon();\n\n for (int i = 0, n = theGeom->getNumGeometries(); i < n; ++i)\n {\n auto *geom = despeckle_polygon(\n dynamic_cast(theGeom->getGeometryRef(i)), theLimit, theGeogFlag);\n if (geom != nullptr)\n out->addGeometryDirectly(geom);\n }\n\n if (out->IsEmpty() == 0)\n return out;\n\n delete out;\n return nullptr;\n }\n catch (...)\n {\n throw Fmi::Exception::Trace(BCP, \"Operation failed!\");\n }\n}\n\n// ----------------------------------------------------------------------\n/*!\n * \\brief Despeckle OGR geometry\n */\n// ----------------------------------------------------------------------\n\n// Needed since two functions call each other\n\nOGRGeometry *despeckle_geom(const OGRGeometry *theGeom, double theLimit, bool theGeogFlag);\n\nOGRGeometryCollection *despeckle_geometrycollection(const OGRGeometryCollection *theGeom,\n double theLimit,\n bool theGeogFlag)\n{\n try\n {\n if (theGeom == nullptr || theGeom->IsEmpty() != 0)\n return nullptr;\n\n auto *out = new OGRGeometryCollection;\n\n for (int i = 0, n = theGeom->getNumGeometries(); i < n; ++i)\n {\n auto *geom = despeckle_geom(theGeom->getGeometryRef(i), theLimit, theGeogFlag);\n if (geom != nullptr)\n out->addGeometryDirectly(geom);\n }\n\n if (out->IsEmpty() == 0)\n return out;\n\n delete out;\n return nullptr;\n }\n catch (...)\n {\n throw Fmi::Exception::Trace(BCP, \"Operation failed!\");\n }\n}\n\n// ----------------------------------------------------------------------\n/*!\n * \\brief Despeckle OGR geometry to output geometry\n */\n// ----------------------------------------------------------------------\n\nOGRGeometry *despeckle_geom(const OGRGeometry *theGeom, double theLimit, bool theGeogFlag)\n{\n try\n {\n OGRwkbGeometryType id = theGeom->getGeometryType();\n\n switch (id)\n {\n case wkbPoint:\n return despeckle_point(dynamic_cast(theGeom));\n case wkbLineString:\n return despeckle_linestring(\n dynamic_cast(theGeom), theLimit, theGeogFlag);\n case wkbPolygon:\n return despeckle_polygon(dynamic_cast(theGeom), theLimit, theGeogFlag);\n case wkbMultiPoint:\n return despeckle_multipoint(dynamic_cast(theGeom));\n case wkbMultiLineString:\n return despeckle_multilinestring(\n dynamic_cast(theGeom), theLimit, theGeogFlag);\n case wkbMultiPolygon:\n return despeckle_multipolygon(\n dynamic_cast(theGeom), theLimit, theGeogFlag);\n case wkbGeometryCollection:\n return despeckle_geometrycollection(\n dynamic_cast(theGeom), theLimit, theGeogFlag);\n case wkbLinearRing:\n throw Fmi::Exception::Trace(BCP, \"Direct despeckling of LinearRings is not supported\");\n case wkbNone:\n throw Fmi::Exception::Trace(\n BCP, \"Encountered a 'none' geometry component when despeckling a geometry\");\n default:\n throw Fmi::Exception::Trace(\n BCP, \"Encountered an unknown geometry component when clipping polygons\");\n }\n\n // NOT REACHED\n return nullptr;\n }\n catch (...)\n {\n throw Fmi::Exception::Trace(BCP, \"Operation failed!\");\n }\n}\n\n// ----------------------------------------------------------------------\n/*!\n * \\brief Despeckle a geometry so that small polygons are removed\n *\n * \\return Empty GeometryCollection if the result is empty\n */\n// ----------------------------------------------------------------------\n\nOGRGeometry *Fmi::OGR::despeckle(const OGRGeometry &theGeom, double theAreaLimit)\n{\n try\n {\n // Area calculations for geographies must be done by ourselves, OGR\n // does it in the native system and hence would produce square degrees.\n\n OGRSpatialReference *crs = theGeom.getSpatialReference();\n bool geographic = (crs != nullptr ? (crs->IsGeographic() != 0) : false);\n\n // Actual despeckling\n\n auto *geom =\n despeckle_geom(&theGeom, theAreaLimit * 1000 * 1000, geographic); // from m^2 to km^2\n\n if (geom != nullptr)\n geom->assignSpatialReference(theGeom.getSpatialReference()); // SR is ref. counted\n\n return geom;\n }\n catch (...)\n {\n throw Fmi::Exception::Trace(BCP, \"Operation failed!\");\n }\n}\n", "meta": {"hexsha": "8bbe1bb27028c4203ce7775fbba4f1af1d06e9fa", "size": 11563, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "gis/OGR-despeckle.cpp", "max_stars_repo_name": "fmidev/smartmet-library-gis", "max_stars_repo_head_hexsha": "3fd5e7ede8f04e262d7de3f884fb575d98ae956d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gis/OGR-despeckle.cpp", "max_issues_repo_name": "fmidev/smartmet-library-gis", "max_issues_repo_head_hexsha": "3fd5e7ede8f04e262d7de3f884fb575d98ae956d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2017-03-01T10:15:38.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-11T10:53:26.000Z", "max_forks_repo_path": "gis/OGR-despeckle.cpp", "max_forks_repo_name": "fmidev/smartmet-library-gis", "max_forks_repo_head_hexsha": "3fd5e7ede8f04e262d7de3f884fb575d98ae956d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2017-09-16T15:14:06.000Z", "max_forks_repo_forks_event_max_datetime": "2017-09-16T15:14:06.000Z", "avg_line_length": 28.3406862745, "max_line_length": 105, "alphanum_fraction": 0.5396523394, "num_tokens": 2799, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8479677506936878, "lm_q2_score": 0.588889130767832, "lm_q1q2_score": 0.49935899162515945}} {"text": "/////////////////////////////////////////////////////////////////////////\n// Copyright (C) 2016 Sergey Koshelev \n/////////////////////////////////////////////////////////////////////////\n\n#define BOOST_TEST_DYN_LINK\n#define BOOST_TEST_MODULE \"HammingDistanceClass\"\n\n#include \n\n#include \"../lib/HammingDistance.h\"\n\nBOOST_AUTO_TEST_CASE( calculateBlobsEqSz )\n{\n // 2 bytes completely different\n unsigned int a = 0xFFFF;\n unsigned int b = 0x0000;\n BOOST_CHECK( HammingDistance::calculate( &a, &b, 1 ) == 16 );\n\n // move 1 set bit through integer and calculate Hamming distance. Must be always 1\n for ( int i = 0; i < sizeof( int ) * 8; ++i )\n {\n unsigned int a = 1 << i;\n unsigned int b = 0;\n BOOST_CHECK( HammingDistance::calculate( &a, &b, 1 ) == 1 );\n }\n\n // check array\n int aa[] = { 0xFFFF, 0x0000 };\n int bb[] = { 0xFFFE, 0x0010 };\n BOOST_CHECK( HammingDistance::calculate( aa, bb, 2 ) == 2 );\n}\n\nBOOST_AUTO_TEST_CASE( calculateBlobsDifSz )\n{\n int aa[] = { 0xFFFF, 0x0000 };\n int bb[] = { 0xFFFF };\n BOOST_CHECK( HammingDistance::calculate( aa, bb, 2, 1 ) == sizeof(int)*8 );\n}\n\nBOOST_AUTO_TEST_CASE( calculateBlobsZeroTerm )\n{\n int aa[] = { 0xFFFF, 0xFFFF, 0x1000, 0 };\n int bb[] = { 0xFFFF, 0xFEFF, 0x0100, 0 };\n BOOST_CHECK( HammingDistance::calculate( aa, bb ) == 3 );\n}\n\nBOOST_AUTO_TEST_CASE( calculateStrings )\n{\n std::string a = \"1011101\";\n std::string b = \"1001001\";\n BOOST_CHECK( HammingDistance::calculate( a, b ) == 2 );\n\n a = \"2173896\";\n b = \"2233796\";\n BOOST_CHECK( HammingDistance::calculate( a, b ) == 3 );\n\n a = \"2173896\";\n b = \"223379\";\n BOOST_CHECK( HammingDistance::calculate( a, b ) == 4 );\n\n}\n\n\n", "meta": {"hexsha": "af56506dab373a4282c52af41778e039be29e81a", "size": 1748, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/HDUnitTests.cpp", "max_stars_repo_name": "serge-koshelev/HammingDistanceLib", "max_stars_repo_head_hexsha": "b682783b1eee7efd3a7423cd0ccbf54700b528f1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/HDUnitTests.cpp", "max_issues_repo_name": "serge-koshelev/HammingDistanceLib", "max_issues_repo_head_hexsha": "b682783b1eee7efd3a7423cd0ccbf54700b528f1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/HDUnitTests.cpp", "max_forks_repo_name": "serge-koshelev/HammingDistanceLib", "max_forks_repo_head_hexsha": "b682783b1eee7efd3a7423cd0ccbf54700b528f1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.3125, "max_line_length": 85, "alphanum_fraction": 0.5652173913, "num_tokens": 524, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.727975460709318, "lm_q2_score": 0.6859494614282922, "lm_q1q2_score": 0.4993543752065695}} {"text": "#ifndef STAN_MATH_PRIM_MAT_FUN_DIAG_PRE_MULTIPLY_HPP\n#define STAN_MATH_PRIM_MAT_FUN_DIAG_PRE_MULTIPLY_HPP\n\n#include \n#include \n#include \n#include \n\nnamespace stan {\n namespace math {\n\n template \n Eigen::Matrix::type,\n R2, C2>\n diag_pre_multiply(const Eigen::Matrix& m1,\n const Eigen::Matrix& m2) {\n check_vector(\"diag_pre_multiply\", \"m1\", m1);\n int m2_rows = m2.rows();\n check_size_match(\"diag_pre_multiply\",\n \"m1.size()\", m1.size(),\n \"m2.rows()\", m2_rows);\n int m2_cols = m2.cols();\n Eigen::Matrix::type,\n R2, C2>\n result(m2_rows, m2_cols);\n for (int j = 0; j < m2_cols; ++j)\n for (int i = 0; i < m2_rows; ++i)\n result(i, j) = m1(i) * m2(i, j);\n return result;\n }\n\n }\n}\n#endif\n", "meta": {"hexsha": "152ea0388ce118e2b580affe1dee44a2c358ee41", "size": 1182, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "cmdstan/stan/lib/stan_math/stan/math/prim/mat/fun/diag_pre_multiply.hpp", "max_stars_repo_name": "yizhang-cae/torsten", "max_stars_repo_head_hexsha": "dc82080ca032325040844cbabe81c9a2b5e046f9", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cmdstan/stan/lib/stan_math/stan/math/prim/mat/fun/diag_pre_multiply.hpp", "max_issues_repo_name": "yizhang-cae/torsten", "max_issues_repo_head_hexsha": "dc82080ca032325040844cbabe81c9a2b5e046f9", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cmdstan/stan/lib/stan_math/stan/math/prim/mat/fun/diag_pre_multiply.hpp", "max_forks_repo_name": "yizhang-cae/torsten", "max_forks_repo_head_hexsha": "dc82080ca032325040844cbabe81c9a2b5e046f9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7714285714, "max_line_length": 76, "alphanum_fraction": 0.60321489, "num_tokens": 347, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7279754489059774, "lm_q2_score": 0.6859494614282923, "lm_q1q2_score": 0.4993543671100745}} {"text": "//This file was modified slightly from\n// http://boost.2283326.n4.nabble.com/accumulators-histogram-td2639100.html\n//I just added the IsDensity flag\n#pragma once\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace boost { namespace accumulators\n{\n\n///////////////////////////////////////////////////////////////////////////////\n// cache_size and num_bins named parameters\n//\nBOOST_PARAMETER_NESTED_KEYWORD(tag, histogram_num_bins, num_bins)\nBOOST_PARAMETER_NESTED_KEYWORD(tag, histogram_min_range, min_range)\nBOOST_PARAMETER_NESTED_KEYWORD(tag, histogram_max_range, max_range)\n\nnamespace impl\n{\n ///////////////////////////////////////////////////////////////////////////////\n // histogram_impl\n // histogram histogram\n /**\n @brief Histogram histogram estimator\n\n The histogram histogram estimator returns a histogram of the sample distribution. The positions and sizes of the bins\n are determined using a specifiable number of cached samples (cache_size). The range between the minimum and the\n maximum of the cached samples is subdivided into a specifiable number of bins (num_bins) of same size. Additionally,\n an under- and an overflow bin is added to capture future under- and overflow samples. Once the bins are determined,\n the cached samples and all subsequent samples are added to the correct bins. At the end, a range of std::pair is\n return, where each pair contains the position of the bin (lower bound) and the samples count (normalized with the\n total number of samples).\n\n @param histogram_cache_size Number of first samples used to determine min and max.\n @param histogram_num_bins Number of bins (two additional bins collect under- and overflow samples).\n */\n template\n struct histogram_impl\n : accumulator_base\n {\n // typedef typename numeric::functional::average::result_type float_type;\n using float_type = double;\n typedef std::vector > histogram_type;\n typedef std::vector array_type;\n // for boost::result_of\n typedef iterator_range result_type;\n\n template\n histogram_impl(Args const& args) :\n\t\tnum_bins(args[histogram_num_bins]),\n\t\tminimum (args[histogram_min_range]),\n\t\tmaximum (args[histogram_max_range]),\n\t\tbin_size (numeric::average(args[histogram_max_range] - args[histogram_min_range], args[histogram_num_bins])),\n\t\tsamples_in_bin(args[histogram_num_bins] + 2, 0.),\n\t\tbin_positions(args[histogram_num_bins] + 2),\n\t\t_histogram(\n\t\t args[histogram_num_bins] + 2,\n\t\t std::make_pair(0,1)\n\t\t ),\n\t is_dirty(true)\n {\n\t\t\t// determine bin positions (their lower bounds)\n for (std::size_t i = 0; i < this->num_bins + 2; ++i)\n {\n this->bin_positions[i] = minimum + (i - 1.0) * bin_size;\n }\n }\n\n template\n void operator ()(Args const &args)\n {\n\t\t\t// std::size_t cnt = count(args);\n {\n if (args[sample] < this->bin_positions[1])\n {\n ++(this->samples_in_bin[0]);\n }\n else if (args[sample] >= this->bin_positions[this->num_bins + 1])\n {\n ++(this->samples_in_bin[this->num_bins + 1]);\n }\n else\n {\n typename array_type::iterator it = std::upper_bound(\n this->bin_positions.begin()\n , this->bin_positions.end()\n , args[sample]\n );\n\n std::size_t d = std::distance(this->bin_positions.begin(), it);\n ++(this->samples_in_bin[d - 1]);\n }\n }\n }\n\n template\n result_type result(Args const &args) const\n {\n {\n // creates a vector of std::pair where each pair i holds\n // the values bin_positions[i] (x-axis of histogram) and\n // samples_in_bin[i] / cnt (y-axis of histogram).\n\n for (std::size_t i = 0; i < this->num_bins + 2; ++i)\n {\n if(IsDensity)\n this->_histogram[i] = std::make_pair(this->bin_positions[i], numeric::average(this->samples_in_bin[i], count(args)));\n else\n this->_histogram[i] = std::make_pair(this->bin_positions[i], this->samples_in_bin[i]);\n }\n }\n // returns a range of pairs\n return make_iterator_range(this->_histogram);\n }\n\n private:\n std::size_t\t\t\t\tnum_bins; // number of bins\n\t\tfloat_type\t\t\t\tminimum;\n\t\tfloat_type\t\t\t\tmaximum;\n\t\tfloat_type\t\t\t\tbin_size;\n array_type\t\t\t\tsamples_in_bin; // number of samples in each bin\n array_type\t\t\t\tbin_positions; // lower bounds of bins\n mutable histogram_type\t_histogram; // histogram\n\t\tmutable\tbool\t\t\tis_dirty;\n };\n\n} // namespace impl\n\n///////////////////////////////////////////////////////////////////////////////\n// tag::histogram_density and tag::histogram_count\n//\nnamespace tag\n{\n template \n struct histogram_generic\n : depends_on\n\t , histogram_num_bins\n\t\t , histogram_min_range\n\t\t , histogram_max_range\n {\n /// INTERNAL ONLY\n ///\n\tstruct impl {\n\t template\n\t struct apply {\n typedef boost::accumulators::impl::histogram_impl type;\n\t };\n\t};\n };\n\n using histogram_density = histogram_generic;\n using histogram_count = histogram_generic;\n}\n\n///////////////////////////////////////////////////////////////////////////////\n// extract::histogram\n//\nnamespace extract\n{\n extractor const histogram_density = {};\n extractor const histogram_count = {};\n}\n\nusing extract::histogram_count;\nusing extract::histogram_density;\n\n// // So that histogram can be automatically substituted\n// // with weighted_histogram when the weight parameter is non-void.\n// template<>\n// struct as_weighted_feature\n// {\n// typedef tag::weighted_histogram type;\n// };\n\n// template<>\n// struct feature_of\n// : feature_of\n// {\n// };\n\n}} // namespace boost::accumulators\n\n\n", "meta": {"hexsha": "50aad2db4441493f19f40eb7a03ed3645d32d193", "size": 7131, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/stat_log/stats/accumulator_types/boost_hist.hpp", "max_stars_repo_name": "vladon/stat_log", "max_stars_repo_head_hexsha": "39cd364d6010dd6fd1d734d474961becccc04a09", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2016-08-08T18:12:59.000Z", "max_stars_repo_stars_event_max_datetime": "2016-08-08T18:12:59.000Z", "max_issues_repo_path": "include/stat_log/stats/accumulator_types/boost_hist.hpp", "max_issues_repo_name": "vladon/stat_log", "max_issues_repo_head_hexsha": "39cd364d6010dd6fd1d734d474961becccc04a09", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/stat_log/stats/accumulator_types/boost_hist.hpp", "max_forks_repo_name": "vladon/stat_log", "max_forks_repo_head_hexsha": "39cd364d6010dd6fd1d734d474961becccc04a09", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2017-07-05T07:50:08.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-13T08:59:29.000Z", "avg_line_length": 36.1979695431, "max_line_length": 140, "alphanum_fraction": 0.6146403029, "num_tokens": 1510, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7279754489059774, "lm_q2_score": 0.6859494485880927, "lm_q1q2_score": 0.49935435776272447}} {"text": "#include \"Integrator21GL.hh\"\n\n#include \n#include \n#include \n\n#include \"GSLSamplerGL.hh\"\n#include \"TypesFunctions.hh\"\n\nusing namespace Eigen;\nusing namespace std;\n\nIntegrator21GL::Integrator21GL(size_t xbins, int xorders, double* xedges, int yorder, double ymin, double ymax) :\nIntegrator21Base(xbins, xorders, xedges, yorder, ymin, ymax)\n{\n init_sampler();\n}\n\nIntegrator21GL::Integrator21GL(size_t xbins, int* xorders, double* xedges, int yorder, double ymin, double ymax) :\nIntegrator21Base(xbins, xorders, xedges, yorder, ymin, ymax)\n{\n init_sampler();\n}\n\nvoid Integrator21GL::sample(FunctionArgs& fargs){\n auto& rets=fargs.rets;\n GSLSamplerGL sampler;\n auto& x=rets[0];\n auto& y=rets[1];\n sampler.fill_bins(m_xorders.size(), m_xorders.data(), m_xedges.data(), x.buffer, m_xweights.data());\n sampler.fill(m_yorder, m_ymin, m_ymax, y.buffer, m_yweights.data());\n\n m_weights = m_xweights.matrix() * m_yweights.matrix().transpose();\n\n rets[2].x = m_xedges.cast();\n\n auto npoints=m_xedges.size()-1;\n rets[3].x = 0.5*(m_xedges.tail(npoints)+m_xedges.head(npoints));\n rets[4].mat = x.vec.replicate(1, m_yweights.size());\n rets[5].mat = y.vec.transpose().replicate(m_xweights.size(), 1);\n rets[6].x = 0.0;\n\n rets.untaint();\n rets.freeze();\n}\n\n", "meta": {"hexsha": "8482ab9fefdfe82e2d34ce622f10062ea1c2400c", "size": 1291, "ext": "cc", "lang": "C++", "max_stars_repo_path": "transformations/integrator/Integrator21GL.cc", "max_stars_repo_name": "gnafit/gna", "max_stars_repo_head_hexsha": "c1a58dac11783342c97a2da1b19c97b85bce0394", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2019-10-14T01:06:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-02T16:33:06.000Z", "max_issues_repo_path": "transformations/integrator/Integrator21GL.cc", "max_issues_repo_name": "gnafit/gna", "max_issues_repo_head_hexsha": "c1a58dac11783342c97a2da1b19c97b85bce0394", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "transformations/integrator/Integrator21GL.cc", "max_forks_repo_name": "gnafit/gna", "max_forks_repo_head_hexsha": "c1a58dac11783342c97a2da1b19c97b85bce0394", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.4680851064, "max_line_length": 114, "alphanum_fraction": 0.7149496514, "num_tokens": 400, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8267118111485244, "lm_q2_score": 0.6039318337259584, "lm_q1q2_score": 0.4992775800698366}} {"text": "//==================================================================================================\n/*!\n @file\n\n @copyright 2016 NumScale SAS\n\n Distributed under the Boost Software License, Version 1.0.\n (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)\n*/\n//==================================================================================================\n#ifndef BOOST_SIMD_ARCH_COMMON_SIMD_FUNCTION_ASEC_HPP_INCLUDED\n#define BOOST_SIMD_ARCH_COMMON_SIMD_FUNCTION_ASEC_HPP_INCLUDED\n\n#include \n#include \n#include \n#include \n\nnamespace boost { namespace simd { namespace ext\n{\n namespace bd = boost::dispatch;\n namespace bs = boost::simd;\n BOOST_DISPATCH_OVERLOAD_IF ( asec_\n , (typename A0, typename X)\n , (detail::is_native)\n , bd::cpu_\n , bs::pack_< bd::double_, X>\n )\n {\n BOOST_FORCEINLINE A0 operator() (const A0& a0) const BOOST_NOEXCEPT\n {\n A0 tmp = (Pio_2()-acsc(a0)) + Constant();\n return if_zero_else(is_equal(a0, One()), tmp);\n }\n };\n\n BOOST_DISPATCH_OVERLOAD_IF( asec_\n , (typename A0, typename X)\n , (detail::is_native)\n , bd::cpu_\n , bs::pack_, X>\n )\n {\n BOOST_FORCEINLINE A0 operator()( const A0& a0) const BOOST_NOEXCEPT\n {\n return (bs::Pio_2()-bs::acsc(a0));\n }\n };\n\n} } }\n\n#endif\n", "meta": {"hexsha": "64e2f9d343ed78a2c4273ae00ae9093fd862f705", "size": 1727, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "third_party/boost/simd/arch/common/simd/function/asec.hpp", "max_stars_repo_name": "SylvainCorlay/pythran", "max_stars_repo_head_hexsha": "908ec070d837baf77d828d01c3e35e2f4bfa2bfa", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 6.0, "max_stars_repo_stars_event_min_datetime": "2018-02-25T22:23:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-15T15:13:12.000Z", "max_issues_repo_path": "third_party/boost/simd/arch/common/simd/function/asec.hpp", "max_issues_repo_name": "SylvainCorlay/pythran", "max_issues_repo_head_hexsha": "908ec070d837baf77d828d01c3e35e2f4bfa2bfa", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "third_party/boost/simd/arch/common/simd/function/asec.hpp", "max_forks_repo_name": "SylvainCorlay/pythran", "max_forks_repo_head_hexsha": "908ec070d837baf77d828d01c3e35e2f4bfa2bfa", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 7.0, "max_forks_repo_forks_event_min_datetime": "2017-12-12T12:36:31.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-10T14:27:07.000Z", "avg_line_length": 32.5849056604, "max_line_length": 100, "alphanum_fraction": 0.5066589461, "num_tokens": 400, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8267117940706734, "lm_q2_score": 0.6039318337259583, "lm_q1q2_score": 0.4992775697559786}} {"text": "//=======================================================================\r\n// Copyright 2007 Aaron Windsor\r\n//\r\n// Distributed under the Boost Software License, Version 1.0. (See\r\n// accompanying file LICENSE_1_0.txt or copy at\r\n// http://www.boost.org/LICENSE_1_0.txt)\r\n//=======================================================================\r\n#ifndef __IS_KURATOWSKI_SUBGRAPH_HPP__\r\n#define __IS_KURATOWSKI_SUBGRAPH_HPP__\r\n\r\n#include \r\n#include //for next/prior\r\n#include //for tie\r\n#include \r\n#include \r\n#include \r\n#include \r\n\r\n#include \r\n#include \r\n#include \r\n\r\n\r\n\r\nnamespace boost\r\n{\r\n \r\n namespace detail\r\n {\r\n\r\n template \r\n Graph make_K_5()\r\n {\r\n typename graph_traits::vertex_iterator vi, vi_end, inner_vi;\r\n Graph K_5(5);\r\n for(tie(vi,vi_end) = vertices(K_5); vi != vi_end; ++vi)\r\n for(inner_vi = next(vi); inner_vi != vi_end; ++inner_vi)\r\n add_edge(*vi, *inner_vi, K_5);\r\n return K_5;\r\n }\r\n\r\n\r\n template \r\n Graph make_K_3_3()\r\n {\r\n typename graph_traits::vertex_iterator \r\n vi, vi_end, bipartition_start, inner_vi;\r\n Graph K_3_3(6);\r\n bipartition_start = next(next(next(vertices(K_3_3).first)));\r\n for(tie(vi, vi_end) = vertices(K_3_3); vi != bipartition_start; ++vi)\r\n for(inner_vi= bipartition_start; inner_vi != vi_end; ++inner_vi)\r\n add_edge(*vi, *inner_vi, K_3_3);\r\n return K_3_3;\r\n }\r\n\r\n\r\n template \r\n void contract_edge(AdjacencyList& neighbors, Vertex u, Vertex v)\r\n {\r\n // Remove u from v's neighbor list\r\n neighbors[v].erase(std::remove(neighbors[v].begin(), \r\n neighbors[v].end(), u\r\n ), \r\n neighbors[v].end()\r\n );\r\n \r\n // Replace any references to u with references to v\r\n typedef typename AdjacencyList::value_type::iterator \r\n adjacency_iterator_t;\r\n \r\n adjacency_iterator_t u_neighbor_end = neighbors[u].end();\r\n for(adjacency_iterator_t u_neighbor_itr = neighbors[u].begin();\r\n u_neighbor_itr != u_neighbor_end; ++u_neighbor_itr\r\n )\r\n {\r\n Vertex u_neighbor(*u_neighbor_itr);\r\n std::replace(neighbors[u_neighbor].begin(), \r\n neighbors[u_neighbor].end(), u, v\r\n );\r\n }\r\n \r\n // Remove v from u's neighbor list\r\n neighbors[u].erase(std::remove(neighbors[u].begin(), \r\n neighbors[u].end(), v\r\n ), \r\n neighbors[u].end()\r\n );\r\n \r\n // Add everything in u's neighbor list to v's neighbor list\r\n std::copy(neighbors[u].begin(), \r\n neighbors[u].end(), \r\n std::back_inserter(neighbors[v])\r\n );\r\n \r\n // Clear u's neighbor list\r\n neighbors[u].clear();\r\n\r\n }\r\n\r\n enum target_graph_t { tg_k_3_3, tg_k_5};\r\n\r\n } // namespace detail\r\n\r\n\r\n\r\n\r\n template \r\n bool is_kuratowski_subgraph(const Graph& g,\r\n ForwardIterator begin, \r\n ForwardIterator end, \r\n VertexIndexMap vm\r\n )\r\n {\r\n\r\n typedef typename graph_traits::vertex_descriptor vertex_t;\r\n typedef typename graph_traits::vertex_iterator vertex_iterator_t;\r\n typedef typename graph_traits::edge_descriptor edge_t;\r\n typedef typename graph_traits::edges_size_type e_size_t;\r\n typedef typename graph_traits::vertices_size_type v_size_t;\r\n typedef typename std::vector v_list_t;\r\n typedef typename v_list_t::iterator v_list_iterator_t;\r\n typedef iterator_property_map\r\n ::iterator, VertexIndexMap> \r\n vertex_to_v_list_map_t;\r\n\r\n typedef adjacency_list small_graph_t;\r\n\r\n detail::target_graph_t target_graph = detail::tg_k_3_3; //unless we decide otherwise later\r\n\r\n static small_graph_t K_5(detail::make_K_5());\r\n\r\n static small_graph_t K_3_3(detail::make_K_3_3());\r\n\r\n v_size_t n_vertices(num_vertices(g));\r\n v_size_t max_num_edges(3*n_vertices - 5);\r\n\r\n std::vector neighbors_vector(n_vertices);\r\n vertex_to_v_list_map_t neighbors(neighbors_vector.begin(), vm);\r\n\r\n e_size_t count = 0;\r\n for(ForwardIterator itr = begin; itr != end; ++itr)\r\n {\r\n\r\n if (count++ > max_num_edges)\r\n return false;\r\n\r\n edge_t e(*itr);\r\n vertex_t u(source(e,g));\r\n vertex_t v(target(e,g));\r\n\r\n neighbors[u].push_back(v);\r\n neighbors[v].push_back(u);\r\n\r\n }\r\n\r\n\r\n for(v_size_t max_size = 2; max_size < 5; ++max_size)\r\n {\r\n\r\n vertex_iterator_t vi, vi_end;\r\n for(tie(vi,vi_end) = vertices(g); vi != vi_end; ++vi)\r\n {\r\n vertex_t v(*vi);\r\n\r\n //a hack to make sure we don't contract the middle edge of a path\r\n //of four degree-3 vertices\r\n if (max_size == 4 && neighbors[v].size() == 3)\r\n {\r\n if (neighbors[neighbors[v][0]].size() +\r\n neighbors[neighbors[v][1]].size() +\r\n neighbors[neighbors[v][2]].size()\r\n < 11 // so, it has two degree-3 neighbors\r\n )\r\n continue;\r\n }\r\n\r\n while (neighbors[v].size() > 0 && neighbors[v].size() < max_size)\r\n {\r\n // Find one of v's neighbors u such that that v and u\r\n // have no neighbors in common. We'll look for such a \r\n // neighbor with a naive cubic-time algorithm since the \r\n // max size of any of the neighbor sets we'll consider \r\n // merging is 3\r\n \r\n bool neighbor_sets_intersect = false;\r\n \r\n vertex_t min_u = graph_traits::null_vertex();\r\n vertex_t u;\r\n v_list_iterator_t v_neighbor_end = neighbors[v].end();\r\n for(v_list_iterator_t v_neighbor_itr = neighbors[v].begin();\r\n v_neighbor_itr != v_neighbor_end; \r\n ++v_neighbor_itr\r\n )\r\n {\r\n neighbor_sets_intersect = false;\r\n u = *v_neighbor_itr;\r\n v_list_iterator_t u_neighbor_end = neighbors[u].end();\r\n for(v_list_iterator_t u_neighbor_itr = \r\n neighbors[u].begin();\r\n u_neighbor_itr != u_neighbor_end && \r\n !neighbor_sets_intersect; \r\n ++u_neighbor_itr\r\n )\r\n {\r\n for(v_list_iterator_t inner_v_neighbor_itr = \r\n neighbors[v].begin();\r\n inner_v_neighbor_itr != v_neighbor_end; \r\n ++inner_v_neighbor_itr\r\n )\r\n {\r\n if (*u_neighbor_itr == *inner_v_neighbor_itr)\r\n {\r\n neighbor_sets_intersect = true;\r\n break;\r\n }\r\n }\r\n \r\n }\r\n if (!neighbor_sets_intersect &&\r\n (min_u == graph_traits::null_vertex() || \r\n neighbors[u].size() < neighbors[min_u].size())\r\n )\r\n {\r\n min_u = u;\r\n }\r\n \r\n }\r\n\r\n if (min_u == graph_traits::null_vertex())\r\n // Exited the loop without finding an appropriate neighbor of\r\n // v, so v must be a lost cause. Move on to other vertices.\r\n break;\r\n else\r\n u = min_u;\r\n\r\n detail::contract_edge(neighbors, u, v);\r\n\r\n }//end iteration over v's neighbors\r\n\r\n }//end iteration through vertices v\r\n\r\n if (max_size == 3)\r\n {\r\n // check to see whether we should go on to find a K_5\r\n for(tie(vi,vi_end) = vertices(g); vi != vi_end; ++vi)\r\n if (neighbors[*vi].size() == 4)\r\n {\r\n target_graph = detail::tg_k_5;\r\n break;\r\n }\r\n\r\n if (target_graph == detail::tg_k_3_3)\r\n break;\r\n }\r\n \r\n }//end iteration through max degree 2,3, and 4\r\n\r\n \r\n //Now, there should only be 5 or 6 vertices with any neighbors. Find them.\r\n \r\n v_list_t main_vertices;\r\n vertex_iterator_t vi, vi_end;\r\n \r\n for(tie(vi,vi_end) = vertices(g); vi != vi_end; ++vi)\r\n {\r\n if (!neighbors[*vi].empty())\r\n main_vertices.push_back(*vi);\r\n }\r\n \r\n // create a graph isomorphic to the contracted graph to test \r\n // against K_5 and K_3_3\r\n small_graph_t contracted_graph(main_vertices.size());\r\n std::map::vertex_descriptor> \r\n contracted_vertex_map;\r\n \r\n typename v_list_t::iterator itr, itr_end;\r\n itr_end = main_vertices.end();\r\n typename graph_traits::vertex_iterator \r\n si = vertices(contracted_graph).first;\r\n \r\n for(itr = main_vertices.begin(); itr != itr_end; ++itr, ++si)\r\n {\r\n contracted_vertex_map[*itr] = *si;\r\n }\r\n\r\n typename v_list_t::iterator jtr, jtr_end;\r\n for(itr = main_vertices.begin(); itr != itr_end; ++itr)\r\n {\r\n jtr_end = neighbors[*itr].end();\r\n for(jtr = neighbors[*itr].begin(); jtr != jtr_end; ++jtr)\r\n {\r\n if (get(vm,*itr) < get(vm,*jtr))\r\n {\r\n add_edge(contracted_vertex_map[*itr],\r\n contracted_vertex_map[*jtr],\r\n contracted_graph\r\n );\r\n }\r\n }\r\n }\r\n \r\n if (target_graph == detail::tg_k_5)\r\n {\r\n return isomorphism(K_5,contracted_graph);\r\n }\r\n else //target_graph == tg_k_3_3\r\n {\r\n return isomorphism(K_3_3,contracted_graph);\r\n }\r\n \r\n \r\n }\r\n\r\n\r\n\r\n\r\n\r\n template \r\n bool is_kuratowski_subgraph(const Graph& g, \r\n ForwardIterator begin, \r\n ForwardIterator end\r\n )\r\n {\r\n return is_kuratowski_subgraph(g, begin, end, get(vertex_index,g));\r\n }\r\n\r\n\r\n\r\n \r\n}\r\n\r\n#endif //__IS_KURATOWSKI_SUBGRAPH_HPP__\r\n", "meta": {"hexsha": "a9509649f0c9c050122e1b3d8560d30169c21443", "size": 11281, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "trunk/win/Source/Includes/Boost/graph/is_kuratowski_subgraph.hpp", "max_stars_repo_name": "dyzmapl/BumpTop", "max_stars_repo_head_hexsha": "1329ea41411c7368516b942d19add694af3d602f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 460.0, "max_stars_repo_stars_event_min_datetime": "2016-01-13T12:49:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-20T04:10:40.000Z", "max_issues_repo_path": "trunk/win/Source/Includes/Boost/graph/is_kuratowski_subgraph.hpp", "max_issues_repo_name": "dyzmapl/BumpTop", "max_issues_repo_head_hexsha": "1329ea41411c7368516b942d19add694af3d602f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 24.0, "max_issues_repo_issues_event_min_datetime": "2016-11-07T04:59:49.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-14T06:34:12.000Z", "max_forks_repo_path": "trunk/win/Source/Includes/Boost/graph/is_kuratowski_subgraph.hpp", "max_forks_repo_name": "dyzmapl/BumpTop", "max_forks_repo_head_hexsha": "1329ea41411c7368516b942d19add694af3d602f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 148.0, "max_forks_repo_forks_event_min_datetime": "2016-01-17T03:16:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T12:20:36.000Z", "avg_line_length": 33.8768768769, "max_line_length": 95, "alphanum_fraction": 0.5099725202, "num_tokens": 2352, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7662936430859598, "lm_q2_score": 0.6513548782017745, "lm_q1q2_score": 0.4991291025590494}} {"text": "//\n// Created by Alex Beccaro on 21/03/18.\n//\n\n#define BOOST_TEST_DYN_LINK\n#include \n#include \"../../src/problems/101-150/101/problem101.hpp\"\n\nBOOST_AUTO_TEST_SUITE( Problem101 )\n\n BOOST_AUTO_TEST_CASE( Example1 ) {\n auto res = problems::problem101::solve([](int32_t x) { return x * x * x; });\n BOOST_CHECK_EQUAL(res, 74);\n }\n\n BOOST_AUTO_TEST_CASE( Solution ) {\n auto res = problems::problem101::solve();\n BOOST_CHECK_EQUAL(res, 37076114526);\n }\n\nBOOST_AUTO_TEST_SUITE_END()", "meta": {"hexsha": "41e764b9f8ac039ce028ccbbb686d54f20955729", "size": 541, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/101-150/test_problem101.cpp", "max_stars_repo_name": "abeccaro/project-euler", "max_stars_repo_head_hexsha": "c3b124bb973dc3a1cf29e8c96c3e70c8816d5fa3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-12-25T10:17:15.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-25T10:17:15.000Z", "max_issues_repo_path": "tests/101-150/test_problem101.cpp", "max_issues_repo_name": "abeccaro/project-euler", "max_issues_repo_head_hexsha": "c3b124bb973dc3a1cf29e8c96c3e70c8816d5fa3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/101-150/test_problem101.cpp", "max_forks_repo_name": "abeccaro/project-euler", "max_forks_repo_head_hexsha": "c3b124bb973dc3a1cf29e8c96c3e70c8816d5fa3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.7619047619, "max_line_length": 84, "alphanum_fraction": 0.6709796673, "num_tokens": 141, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7662936430859598, "lm_q2_score": 0.6513548714339145, "lm_q1q2_score": 0.4991290973728813}} {"text": "//\n// Copyright (c) 2015-2020 CNRS INRIA\n// Copyright (c) 2015 Wandercraft, 86 rue de Paris 91400 Orsay, France.\n//\n\n#include \"pinocchio/math/fwd.hpp\"\n#include \"pinocchio/multibody/joint/joints.hpp\"\n#include \"pinocchio/algorithm/rnea.hpp\"\n#include \"pinocchio/algorithm/aba.hpp\"\n#include \"pinocchio/algorithm/crba.hpp\"\n#include \"pinocchio/algorithm/jacobian.hpp\"\n#include \"pinocchio/algorithm/compute-all-terms.hpp\"\n\n#include \n#include \n\nusing namespace pinocchio;\n\ntemplate\nvoid addJointAndBody(Model & model,\n const JointModelBase & jmodel,\n const Model::JointIndex parent_id,\n const SE3 & joint_placement,\n const std::string & joint_name,\n const Inertia & Y)\n{\n Model::JointIndex idx;\n \n idx = model.addJoint(parent_id,jmodel,joint_placement,joint_name);\n model.appendBodyToJoint(idx,Y);\n}\n\nBOOST_AUTO_TEST_SUITE(JointSpherical)\n \nBOOST_AUTO_TEST_CASE(spatial)\n{\n SE3 M(SE3::Random());\n Motion v(Motion::Random());\n \n MotionSpherical mp(MotionSpherical::Vector3(1.,2.,3.));\n Motion mp_dense(mp);\n \n BOOST_CHECK(M.act(mp).isApprox(M.act(mp_dense)));\n BOOST_CHECK(M.actInv(mp).isApprox(M.actInv(mp_dense)));\n \n BOOST_CHECK(v.cross(mp).isApprox(v.cross(mp_dense)));\n}\n\nBOOST_AUTO_TEST_CASE(vsFreeFlyer)\n{\n using namespace pinocchio;\n typedef SE3::Vector3 Vector3;\n typedef Eigen::Matrix Vector6;\n typedef Eigen::Matrix VectorFF;\n typedef SE3::Matrix3 Matrix3;\n\n Model modelSpherical, modelFreeflyer;\n\n Inertia inertia(1., Vector3(0.5, 0., 0.0), Matrix3::Identity());\n SE3 pos(1); pos.translation() = SE3::LinearType(1.,0.,0.);\n\n addJointAndBody(modelSpherical,JointModelSpherical(),0,pos,\"spherical\",inertia);\n addJointAndBody(modelFreeflyer,JointModelFreeFlyer(),0,pos,\"free-flyer\",inertia);\n \n Data dataSpherical(modelSpherical);\n Data dataFreeFlyer(modelFreeflyer);\n\n Eigen::VectorXd q = Eigen::VectorXd::Ones(modelSpherical.nq);q.normalize();\n VectorFF qff; qff << 0, 0, 0, q[0], q[1], q[2], q[3];\n Eigen::VectorXd v = Eigen::VectorXd::Ones(modelSpherical.nv);\n Vector6 vff; vff << 0, 0, 0, 1, 1, 1;\n Eigen::VectorXd tauSpherical = Eigen::VectorXd::Ones(modelSpherical.nv);\n Eigen::VectorXd tauff; tauff.resize(7); tauff << 0,0,0,1,1,1,1;\n Eigen::VectorXd aSpherical = Eigen::VectorXd::Ones(modelSpherical.nv);\n Eigen::VectorXd aff(vff);\n \n forwardKinematics(modelSpherical, dataSpherical, q, v);\n forwardKinematics(modelFreeflyer, dataFreeFlyer, qff, vff);\n\n computeAllTerms(modelSpherical, dataSpherical, q, v);\n computeAllTerms(modelFreeflyer, dataFreeFlyer, qff, vff);\n\n BOOST_CHECK(dataFreeFlyer.oMi[1].isApprox(dataSpherical.oMi[1]));\n BOOST_CHECK(dataFreeFlyer.liMi[1].isApprox(dataSpherical.liMi[1]));\n BOOST_CHECK(dataFreeFlyer.Ycrb[1].matrix().isApprox(dataSpherical.Ycrb[1].matrix()));\n BOOST_CHECK(dataFreeFlyer.f[1].toVector().isApprox(dataSpherical.f[1].toVector()));\n \n Eigen::VectorXd nle_expected_ff(3); nle_expected_ff << dataFreeFlyer.nle[3],\n dataFreeFlyer.nle[4],\n dataFreeFlyer.nle[5]\n ;\n BOOST_CHECK(nle_expected_ff.isApprox(dataSpherical.nle));\n BOOST_CHECK(dataFreeFlyer.com[0].isApprox(dataSpherical.com[0]));\n\n // InverseDynamics == rnea\n tauSpherical = rnea(modelSpherical, dataSpherical, q, v, aSpherical);\n tauff = rnea(modelFreeflyer, dataFreeFlyer, qff, vff, aff);\n\n Vector3 tau_expected; tau_expected << tauff(3), tauff(4), tauff(5);\n BOOST_CHECK(tauSpherical.isApprox(tau_expected));\n\n // ForwardDynamics == aba\n Eigen::VectorXd aAbaSpherical = aba(modelSpherical,dataSpherical, q, v, tauSpherical);\n Eigen::VectorXd aAbaFreeFlyer = aba(modelFreeflyer,dataFreeFlyer, qff, vff, tauff);\n Vector3 a_expected; a_expected << aAbaFreeFlyer[3],\n aAbaFreeFlyer[4],\n aAbaFreeFlyer[5]\n ;\n BOOST_CHECK(aAbaSpherical.isApprox(a_expected));\n\n // crba\n crba(modelSpherical, dataSpherical,q);\n crba(modelFreeflyer, dataFreeFlyer, qff);\n\n Eigen::Matrix M_expected(dataFreeFlyer.M.bottomRightCorner<3,3>());\n\n BOOST_CHECK(dataSpherical.M.isApprox(M_expected));\n \n // Jacobian\n Eigen::Matrix jacobian_planar;jacobian_planar.resize(6,3); jacobian_planar.setZero();\n Eigen::Matrix jacobian_ff;jacobian_ff.resize(6,6);jacobian_ff.setZero();\n computeJointJacobians(modelSpherical, dataSpherical, q);\n computeJointJacobians(modelFreeflyer, dataFreeFlyer, qff);\n getJointJacobian(modelSpherical, dataSpherical, 1, LOCAL, jacobian_planar);\n getJointJacobian(modelFreeflyer, dataFreeFlyer, 1, LOCAL, jacobian_ff);\n\n\n Eigen::Matrix jacobian_expected; jacobian_expected << jacobian_ff.col(3),\n jacobian_ff.col(4),\n jacobian_ff.col(5)\n ;\n\n BOOST_CHECK(jacobian_planar.isApprox(jacobian_expected));\n\n}\nBOOST_AUTO_TEST_SUITE_END()\n\nBOOST_AUTO_TEST_SUITE(JointSphericalZYX)\n \nBOOST_AUTO_TEST_CASE(spatial)\n{\n SE3 M(SE3::Random());\n Motion v(Motion::Random());\n \n MotionSpherical mp(MotionSpherical::Vector3(1.,2.,3.));\n Motion mp_dense(mp);\n \n BOOST_CHECK(M.act(mp).isApprox(M.act(mp_dense)));\n BOOST_CHECK(M.actInv(mp).isApprox(M.actInv(mp_dense)));\n \n BOOST_CHECK(v.cross(mp).isApprox(v.cross(mp_dense)));\n}\n\nBOOST_AUTO_TEST_CASE(vsFreeFlyer)\n{\n // WARNIG : Dynamic algorithm's results cannot be compared to FreeFlyer's ones because\n // of the representation of the rotation and the ConstraintSubspace difference.\n using namespace pinocchio;\n typedef SE3::Vector3 Vector3;\n typedef Eigen::Matrix Vector6;\n typedef Eigen::Matrix VectorFF;\n typedef SE3::Matrix3 Matrix3;\n\n Model modelSphericalZYX, modelFreeflyer;\n\n Inertia inertia(1., Vector3(0.5, 0., 0.0), Matrix3::Identity());\n SE3 pos(1); pos.translation() = SE3::LinearType(1.,0.,0.);\n\n addJointAndBody(modelSphericalZYX,JointModelSphericalZYX(),0,pos,\"spherical-zyx\",inertia);\n addJointAndBody(modelFreeflyer,JointModelFreeFlyer(),0,pos,\"free-flyer\",inertia);\n\n Data dataSphericalZYX(modelSphericalZYX);\n Data dataFreeFlyer(modelFreeflyer);\n\n Eigen::AngleAxisd rollAngle(1, Eigen::Vector3d::UnitZ());\n Eigen::AngleAxisd yawAngle(1, Eigen::Vector3d::UnitY());\n Eigen::AngleAxisd pitchAngle(1, Eigen::Vector3d::UnitX());\n Eigen::Quaterniond q_sph = rollAngle * yawAngle * pitchAngle;\n \n Eigen::VectorXd q = Eigen::VectorXd::Ones(modelSphericalZYX.nq);\n VectorFF qff; qff << 0, 0, 0, q_sph.x(), q_sph.y(), q_sph.z(), q_sph.w();\n Eigen::VectorXd v = Eigen::VectorXd::Ones(modelSphericalZYX.nv);\n Vector6 vff; vff << 0, 0, 0, 1, 1, 1;\n Eigen::VectorXd tauSpherical = Eigen::VectorXd::Ones(modelSphericalZYX.nv);\n Eigen::VectorXd tauff; tauff.resize(6); tauff << 0,0,0,1,1,1;\n Eigen::VectorXd aSpherical = Eigen::VectorXd::Ones(modelSphericalZYX.nv);\n Eigen::VectorXd aff(vff);\n \n forwardKinematics(modelSphericalZYX, dataSphericalZYX, q, v);\n forwardKinematics(modelFreeflyer, dataFreeFlyer, qff, vff);\n\n computeAllTerms(modelSphericalZYX, dataSphericalZYX, q, v);\n computeAllTerms(modelFreeflyer, dataFreeFlyer, qff, vff);\n\n BOOST_CHECK(dataFreeFlyer.oMi[1].isApprox(dataSphericalZYX.oMi[1]));\n BOOST_CHECK(dataFreeFlyer.liMi[1].isApprox(dataSphericalZYX.liMi[1]));\n BOOST_CHECK(dataFreeFlyer.Ycrb[1].matrix().isApprox(dataSphericalZYX.Ycrb[1].matrix()));\n\n BOOST_CHECK(dataFreeFlyer.com[0].isApprox(dataSphericalZYX.com[0]));\n}\n\nBOOST_AUTO_TEST_CASE(test_rnea)\n{\n using namespace pinocchio;\n typedef SE3::Vector3 Vector3;\n typedef SE3::Matrix3 Matrix3;\n\n Model model;\n Inertia inertia(1., Vector3(0.5, 0., 0.0), Matrix3::Identity());\n\n addJointAndBody(model,JointModelSphericalZYX(),model.getJointId(\"universe\"),SE3::Identity(),\"root\",inertia);\n\n Data data(model);\n\n Eigen::VectorXd q = Eigen::VectorXd::Zero(model.nq);\n Eigen::VectorXd v = Eigen::VectorXd::Zero(model.nv);\n Eigen::VectorXd a = Eigen::VectorXd::Zero(model.nv);\n\n rnea(model, data, q, v, a);\n Vector3 tau_expected(0., -4.905, 0.);\n\n BOOST_CHECK(tau_expected.isApprox(data.tau, 1e-14));\n\n q = Eigen::VectorXd::Ones(model.nq);\n v = Eigen::VectorXd::Ones(model.nv);\n a = Eigen::VectorXd::Ones(model.nv);\n\n rnea(model, data, q, v, a);\n tau_expected << -0.53611600195085, -0.74621832606188, -0.38177329067604;\n\n BOOST_CHECK(tau_expected.isApprox(data.tau, 1e-12));\n\n q << 3, 2, 1;\n v = Eigen::VectorXd::Ones(model.nv);\n a = Eigen::VectorXd::Ones(model.nv);\n\n rnea(model, data, q, v, a);\n tau_expected << 0.73934458094049, 2.7804530848031, 0.50684940972146;\n\n BOOST_CHECK(tau_expected.isApprox(data.tau, 1e-12));\n}\n\nBOOST_AUTO_TEST_CASE(test_crba)\n{\n using namespace pinocchio;\n using namespace std;\n typedef SE3::Vector3 Vector3;\n typedef SE3::Matrix3 Matrix3;\n\n Model model;\n Inertia inertia(1., Vector3(0.5, 0., 0.0), Matrix3::Identity());\n\n addJointAndBody(model,JointModelSphericalZYX(),model.getJointId(\"universe\"),SE3::Identity(),\"root\",inertia);\n\n Data data(model);\n\n Eigen::VectorXd q(Eigen::VectorXd::Zero(model.nq));\n Eigen::MatrixXd M_expected(model.nv,model.nv);\n\n crba(model, data, q);\n M_expected <<\n 1.25, 0, 0,\n 0, 1.25, 0,\n 0, 0, 1;\n\n BOOST_CHECK(M_expected.isApprox(data.M, 1e-14));\n\n q = Eigen::VectorXd::Ones(model.nq);\n\n crba(model, data, q);\n M_expected <<\n 1.0729816454316, -5.5511151231258e-17, -0.8414709848079,\n -5.5511151231258e-17, 1.25, 0,\n -0.8414709848079, 0, 1;\n\n BOOST_CHECK(M_expected.isApprox(data.M, 1e-12));\n\n q << 3, 2, 1;\n\n crba(model, data, q);\n M_expected <<\n 1.043294547392, 2.7755575615629e-17, -0.90929742682568,\n 0, 1.25, 0,\n -0.90929742682568, 0, 1;\n\n BOOST_CHECK(M_expected.isApprox(data.M, 1e-10));\n}\n\nBOOST_AUTO_TEST_SUITE_END()\n", "meta": {"hexsha": "4a5068221e781e3f1b85a2bb6ceb301c6cc3e3cc", "size": 10311, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "unittest/joint-spherical.cpp", "max_stars_repo_name": "thanhndv212/pinocchio", "max_stars_repo_head_hexsha": "3b4d272bf4e8a231954b71201ee7e0963c944aef", "max_stars_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_stars_count": 716.0, "max_stars_repo_stars_event_min_datetime": "2015-03-30T16:26:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T12:26:58.000Z", "max_issues_repo_path": "unittest/joint-spherical.cpp", "max_issues_repo_name": "thanhndv212/pinocchio", "max_issues_repo_head_hexsha": "3b4d272bf4e8a231954b71201ee7e0963c944aef", "max_issues_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_issues_count": 1130.0, "max_issues_repo_issues_event_min_datetime": "2015-02-21T17:30:44.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T09:06:22.000Z", "max_forks_repo_path": "unittest/joint-spherical.cpp", "max_forks_repo_name": "thanhndv212/pinocchio", "max_forks_repo_head_hexsha": "3b4d272bf4e8a231954b71201ee7e0963c944aef", "max_forks_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_forks_count": 239.0, "max_forks_repo_forks_event_min_datetime": "2015-02-05T14:15:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T23:51:47.000Z", "avg_line_length": 35.5551724138, "max_line_length": 114, "alphanum_fraction": 0.6780137717, "num_tokens": 3121, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7662936430859597, "lm_q2_score": 0.6513548714339145, "lm_q1q2_score": 0.49912909737288125}} {"text": "#include\r\n#include\r\n#include\r\n#include\r\n#include\r\n#include \r\n#include \r\n#include \r\n#include \r\n#include \r\n#include\r\n#include \r\nusing namespace std;\r\n// Function to print the vector\r\nvoid PrintData(vector& data);\r\n// Function to print the matrix\r\nvoid PrintData(vector< vector >& data);\r\n\r\n// Function to do Cholesky decomposition\r\nvector< vector > Cholesky(vector< vector >& data);\r\n\r\n//Function to write matrix into csv file\r\nvoid WriteToCsv(string CsvFile, vector< vector >& data);\r\n\r\n//Function to write vector into csv file\r\nvoid WriteToCsv(string CsvFile, vector& data);\r\n\r\n// Function to calculate standard deviation\r\ndouble CaclVariance(vector& data);", "meta": {"hexsha": "00776289061b3d30b7f7edee1fd917f7ed3abc5a", "size": 910, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "Extra.hpp", "max_stars_repo_name": "icezerowjj/Monte-Carlo-Simulation", "max_stars_repo_head_hexsha": "a9cfb6cc0fcdd274138590f2845b758d8bc3c9e7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2018-01-27T15:17:59.000Z", "max_stars_repo_stars_event_max_datetime": "2018-01-27T15:17:59.000Z", "max_issues_repo_path": "Extra.hpp", "max_issues_repo_name": "icezerowjj/Monte-Carlo-Option-Pricing", "max_issues_repo_head_hexsha": "a9cfb6cc0fcdd274138590f2845b758d8bc3c9e7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Extra.hpp", "max_forks_repo_name": "icezerowjj/Monte-Carlo-Option-Pricing", "max_forks_repo_head_hexsha": "a9cfb6cc0fcdd274138590f2845b758d8bc3c9e7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2019-04-11T06:14:25.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-19T04:20:04.000Z", "avg_line_length": 31.3793103448, "max_line_length": 67, "alphanum_fraction": 0.7483516484, "num_tokens": 191, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.6513548646660543, "lm_q2_score": 0.7662936430859597, "lm_q1q2_score": 0.499129092186713}} {"text": "#include \n#include \n#include \n\n#include \"check_adjoint.h\"\n#include \"test_utils.h\"\n#include \"renderer_blending.cuh\"\n\n\ntemplate\nvoid testAdjointBlending()\n{\n\ttypedef empty TmpStorage_t;\n\ttypedef VectorXr Vector_t;\n\n\tauto forward = [](const Vector_t& x, TmpStorage_t* tmp) -> Vector_t\n\t{\n\t\tconst real4 acc = fromEigen4(x.segment(0, 4));\n\t\tconst real4 current = fromEigen4(x.segment(4, 4));\n\t\tconst real_t stepsize = x[8];\n\n\t\tconst real4 output = kernel::Blending::blend(\n\t\t\tacc, current, stepsize);\n\n\t\treturn toEigen(output);\n\t};\n\tauto adjoint = [](const Vector_t& x, const Vector_t& e, const Vector_t& g,\n\t\tVector_t& z, const TmpStorage_t& tmp)\n\t{\n\t\tconst real4 acc = fromEigen4(x.segment(0, 4));\n\t\tconst real4 current = fromEigen4(x.segment(4, 4));\n\t\tconst real_t stepsize = x[8];\n\n\t\tconst real4 output = fromEigen4(e);\n\t\tconst real4 adj_output = fromEigen4(g);\n\n\t\treal4 adj_acc, adj_current;\n\t\treal_t adj_stepsize;\n\n\t\treal4 acc_in = kernel::Blending::adjoint(\n\t\t\toutput, current, stepsize, adj_output,\n\t\t\tadj_acc, adj_current, adj_stepsize);\n\n\t\tINFO(\"input-acc: \" << acc);\n\t\tINFO(\"reconstructed-acc: \" << acc_in);\n\t\tREQUIRE(acc.x == Approx(acc_in.x));\n\t\tREQUIRE(acc.y == Approx(acc_in.y));\n\t\tREQUIRE(acc.z == Approx(acc_in.z));\n\t\tREQUIRE(acc.w == Approx(acc_in.w));\n\n\t\tz.segment<4>(0) = toEigen(adj_acc);\n\t\tz.segment<4>(4) = toEigen(adj_current);\n\t\tz[8] = adj_stepsize;\n\t};\n\n\tstd::default_random_engine rnd(42);\n\tstd::uniform_real_distribution distr(0.01, 0.99);\n\tint N = 20;\n\tfor (int i = 0; i < N; ++i)\n\t{\n\t\tINFO(\"N=\" << i);\n\t\tVector_t x(9);\n\t\tfor (int j = 0; j < 9; ++j) x[j] = distr(rnd);\n\n\t\tcheckAdjoint(x, forward, adjoint,\n\t\t\t1e-5, 1e-5, 1e-6);\n\t}\n}\n\nTEST_CASE(\"Adjoint-Blending-BeerLambert\", \"[adjoint]\")\n{\n\ttestAdjointBlending();\n}\n\nTEST_CASE(\"Adjoint-Blending-Alpha\", \"[adjoint]\")\n{\n\ttestAdjointBlending();\n}\n\n", "meta": {"hexsha": "1aa1aad910a6beaaffcbd4193f42ab9be8327aeb", "size": 1980, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "unittests/testAdjointBlending.cpp", "max_stars_repo_name": "shamanDevel/DiffDVR", "max_stars_repo_head_hexsha": "99fbe9f114d0097daf402bde2ae35f18dade335d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 12.0, "max_stars_repo_stars_event_min_datetime": "2021-08-02T04:51:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-14T18:02:27.000Z", "max_issues_repo_path": "unittests/testAdjointBlending.cpp", "max_issues_repo_name": "shamanDevel/DiffDVR", "max_issues_repo_head_hexsha": "99fbe9f114d0097daf402bde2ae35f18dade335d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-11-04T14:23:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-28T10:30:13.000Z", "max_forks_repo_path": "unittests/testAdjointBlending.cpp", "max_forks_repo_name": "shamanDevel/DiffDVR", "max_forks_repo_head_hexsha": "99fbe9f114d0097daf402bde2ae35f18dade335d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2021-07-16T10:23:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-04T02:51:43.000Z", "avg_line_length": 24.75, "max_line_length": 75, "alphanum_fraction": 0.6803030303, "num_tokens": 632, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7662936430859597, "lm_q2_score": 0.6513548646660542, "lm_q1q2_score": 0.4991290921867129}} {"text": "//============================================================================\n// Daniel J. Greenhoe\n// normed linear space R^2\n//=============================================================================\n//=====================================\n// headers\n//=====================================\n#include \n#include \n#include \n#include \n#include // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89325\n#include \"r1.h\"\n#include \"r4.h\"\ntypedef Eigen::Matrix< double, 4, 1 > Vector4d;\n\n//=====================================\n// oquad\n//=====================================\n//-----------------------------------------------------------------------------\n// oquad constructors\n//-----------------------------------------------------------------------------\noquad::oquad(void)\n{\n xx.at(0) = 0.0;\n xx.at(1) = 0.0;\n xx.at(2) = 0.0;\n xx.at(3) = 0.0;\n}\n\noquad::oquad(double u0, double u1, double u2, double u3)\n{\n xx.at(0) = u0;\n xx.at(1) = u1;\n xx.at(2) = u2;\n xx.at(3) = u3;\n}\n\noquad::oquad(double u)\n{\n xx.at(0) = u;\n xx.at(1) = u;\n xx.at(2) = u;\n xx.at(3) = u;\n}\n\n//-----------------------------------------------------------------------------\n//! \\brief oquad put member functions\n//-----------------------------------------------------------------------------\nvoid oquad::put(double u)\n{\n xx.at(0) = u;\n xx.at(1) = u;\n xx.at(2) = u;\n xx.at(3) = u;\n}\n\nvoid oquad::put(oquad u)\n{\n xx.at(0) = u.get(0);\n xx.at(1) = u.get(1);\n xx.at(2) = u.get(2);\n xx.at(3) = u.get(3);\n}\n\n//-----------------------------------------------------------------------------\n//! \\brief Write values to oquad\n//-----------------------------------------------------------------------------\nvoid oquad::put(double u0, double u1, double u2, double u3)\n{\n xx.at(0) = u0;\n xx.at(1) = u1;\n xx.at(2) = u2;\n xx.at(3) = u3;\n}\n\n//-----------------------------------------------------------------------------\n//! \\brief Write values to oquad\n//-----------------------------------------------------------------------------\nvoid oquad::put(int n,double u)\n{\n xx.at(n) = u;\n}\n\n//-----------------------------------------------------------------------------\n//! \\brief return the 4-tuple value\n//-----------------------------------------------------------------------------\noquad oquad::get(void)\n{\n oquad u;\n u.put( 0, get1() );\n u.put( 1, get2() );\n u.put( 2, get3() );\n u.put( 3, get4() );\n return u;\n}\n\n//-----------------------------------------------------------------------------\n//! \\brief return the minimum element of the 4 tupple\n//-----------------------------------------------------------------------------\ndouble oquad::min(void) const\n{\n const Eigen::Map< const Vector4d > a( getdata() );\n const double minVal = a.minCoeff();\n return minVal;\n}\n\n//-----------------------------------------------------------------------------\n//! \\brief return the maximum element of the 4 tupple\n//-----------------------------------------------------------------------------\ndouble oquad::max(void) const\n{\n const Eigen::Map< const Vector4d > a( getdata() );\n const double maxVal = a.maxCoeff();\n return maxVal;\n}\n\n//-----------------------------------------------------------------------------\n//! \\brief print the tuple\n//-----------------------------------------------------------------------------\nvoid oquad::list(const char *str1, const char *str2) const\n{\n if(strlen(str1)!=0)printf(\"%s\",str1);\n putchar('(');\n printf(\"%9.6lf,\", get1() );\n printf(\"%9.6lf,\", get2() );\n printf(\"%9.6lf,\", get3() );\n printf(\"%9.6lf)\", get4() );\n if(strlen(str2)!=0)printf(\"%s\",str2);\n}\n\n//=====================================\n// vectR4 functions\n//=====================================\n//-----------------------------------------------------------------------------\n//! \\brief return the 4-tuple value\n//-----------------------------------------------------------------------------\nconst vectR4 vectR4::get(void)\n{\n vectR4 u;\n u.put( 0, get1() );\n u.put( 1, get2() );\n u.put( 2, get3() );\n u.put( 3, get4() );\n return u;\n}\n\n//-----------------------------------------------------------------------------\n//! \\brief magnitude\n//-----------------------------------------------------------------------------\ndouble vectR4::mag(void) const\n{\n const Eigen::Map< const Vector4d > a( getdata() );\n return a.norm();\n}\n\n//-----------------------------------------------------------------------------\n//! \\brief Multiply the vector by a scalar a\n//-----------------------------------------------------------------------------\nvectR4 vectR4::mpy(const double a)\n{\n vectR4 w;\n const Eigen::Map< const Vector4d > vv( getdata() );\n Eigen::Map< Vector4d > ww( w.getdataa() );\n ww = a * vv;\n return w;\n}\n\n//-----------------------------------------------------------------------------\n//! \\brief operator: +=\n//-----------------------------------------------------------------------------\nvoid vectR4::operator+=(vectR4 q)\n{\n Eigen::Map< Vector4d > pp( getdataa() );\n const Eigen::Map< const Vector4d > qq( q.getdata() );\n pp = pp + qq;\n}\n\n//-----------------------------------------------------------------------------\n//! \\brief operator: -=\n//-----------------------------------------------------------------------------\nvoid vectR4::operator-=(vectR4 q)\n{\n Eigen::Map< Vector4d > pp( getdataa() );\n const Eigen::Map< const Vector4d > qq( q.getdata() );\n pp = pp - qq;\n}\n\n//-----------------------------------------------------------------------------\n//! \\brief operator: -=\n//-----------------------------------------------------------------------------\nvoid vectR4::operator*=(double a)\n{\n vectR4 p=get();\n p = a*p;\n put(p);\n}\n\n//-----------------------------------------------------------------------------\n//! \\brief operator: a*y\n//-----------------------------------------------------------------------------\nvectR4 operator*(const double a, const vectR4 x)\n{\n vectR4 y;\n const Eigen::Map< const Vector4d > xx( x.getdata() );\n Eigen::Map< Vector4d > yy( y.getdataa() );\n yy = a * xx;\n return y;\n}\n\n//-----------------------------------------------------------------------------\n//! \\brief operator: dot product of p and q\n//-----------------------------------------------------------------------------\ndouble operator^(vectR4 p,vectR4 q)\n{\n const Eigen::Map< const Vector4d > pp( p.getdata() );\n const Eigen::Map< const Vector4d > qq( q.getdata() );\n double innerProduct = pp.adjoint() * qq;\n return innerProduct;\n}\n\n//=====================================\n//! \\brief seqR4\n//=====================================\n//-----------------------------------------------------------------------------\n//! \\brief constructor initializing seqR1 to 0\n//-----------------------------------------------------------------------------\nseqR4::seqR4(long M)\n{\n N=M;\n x = new vectR4[N];\n clear();\n}\n\n//-----------------------------------------------------------------------------\n//! \\brief constructor initializing seqR1 to \n//-----------------------------------------------------------------------------\nseqR4::seqR4( long M, double u )\n{\n N=M;\n x = new vectR4[N];\n fill( u );\n}\n\n//-----------------------------------------------------------------------------\n//! \\brief Fill the seqR3 with a value 0\n//-----------------------------------------------------------------------------\nvoid seqR4::clear(void)\n{\n fill( 0.0 );\n}\n\n//-----------------------------------------------------------------------------\n//! \\brief fill the seqR4 with a value \n//-----------------------------------------------------------------------------\nvoid seqR4::fill(double u)\n{\n for(long n=0; n0){\n printf(\"%s\",str1);\n if(fptr!=NULL)fprintf(fptr,\"%s\",str1);\n }\n for(n=start,m=1; n<=end; n++,m++){\n p=x[n];\n printf(\"(%5.2lf,%5.2lf,%5.2lf,%5.2lf) \",p.get1(),p.get2(),p.get3(),p.get4());\n if(m%2==0)printf(\"\\n\");\n if(fptr!=NULL){\n fprintf(fptr,\"(%5.2lf,%5.2lf,%5.2lf,%5.2lf) \",p.get1(),p.get2(),p.get3(),p.get4());\n if(m%2==0)fprintf(fptr,\"\\n\");\n }\n }\n if(strlen(str2)>0){\n printf(\"%s\",str2);\n if(fptr!=NULL)fprintf(fptr,\"%s\",str2);\n }\n }\n\n//-----------------------------------------------------------------------------\n//! \\brief list contents of seqR1 using 1 digit per element\n//-----------------------------------------------------------------------------\nvoid seqR4::list1(const long start, const long end, const char *str1, const char *str2,FILE *fptr){\n long n,m;\n vectR4 p;\n if(strlen(str1)>0){\n printf(\"%s\",str1);\n if(fptr!=NULL)fprintf(fptr,\"%s\",str1);\n }\n for(n=start,m=1; n<=end; n++,m++){\n p=x[n];\n printf(\" %1.0lf%1.0lf%1.0lf%1.0lf\",p.get1(),p.get2(),p.get3(),p.get4());\n if(fptr!=NULL)fprintf(fptr,\" %1.0lf%1.0lf%1.0lf%1.0lf\",p.get1(),p.get2(),p.get3(),p.get4());\n if(m%10==0)printf(\"\\n\");\n if(fptr!=NULL)if(m%10==0)fprintf(fptr,\"\\n\");\n }\n if(strlen(str2)>0){\n printf(\"%s\",str2);\n if(fptr!=NULL)fprintf(fptr,\"%s\",str2);\n }\n }\n\n/*=====================================\n//! \\brief external operations\n *=====================================*/\n//-----------------------------------------------------------------------------\n//! \\brief operator: return p+q\n//-----------------------------------------------------------------------------\nvectR4 operator+(vectR4 p, vectR4 q){\n int i;\n vectR4 y;\n for(i=0;i<4;i++)y.put(i,p.get(i)+q.get(i));\n return y;\n }\n\n//-----------------------------------------------------------------------------\n//! \\brief operator: return p-q\n//-----------------------------------------------------------------------------\nvectR4 operator-(vectR4 p, vectR4 q){\n int i;\n vectR4 y;\n for(i=0;i<4;i++)y.put(i,p.get(i)-q.get(i));\n return y;\n }\n\n//-----------------------------------------------------------------------------\n//! \\brief operator: return -p\n//-----------------------------------------------------------------------------\nvectR4 operator-(vectR4 p){\n vectR4 q;\n int i;\n for(i=0;i<4;i++)q.put(i,-p.get(i));\n return q;\n }\n\n//-----------------------------------------------------------------------------\n//! \\brief return the angle theta in radians between the two vectors induced by\n//! \\brief the points

and in the space R^4.\n//! \\brief on SUCCESS return theta in the closed interval [0:PI]\n//! \\brief on ERROR return negative value or exit with value EXIT_FAILURE\n//-----------------------------------------------------------------------------\ndouble pqtheta(const vectR4 p, const vectR4 q){\n const double rp = p.r();\n const double rq = q.r();\n double y,theta;\n if(rp==0) return -1;\n if(rq==0) return -2;\n y = (p^q)/(rp*rq);\n if(y>+1) {fprintf(stderr,\"\\nERROR using pqtheta(vectR4 p, vectR4 q): (p^q)/(rp*rq)=%lf>+1\\n\",y); exit(EXIT_FAILURE);}\n if(y<-1) {fprintf(stderr,\"\\nERROR using pqtheta(vectR4 p, vectR4 q): (p^q)/(rp*rq)=%lf<-1\\n\",y); exit(EXIT_FAILURE);}\n theta = acos(y);\n return theta;\n }\n\n/*=====================================\n//! \\brief external operations\n *=====================================*/\n//-----------------------------------------------------------------------------\n//! \\brief compute magnitude of R^1 sequence\n//-----------------------------------------------------------------------------\n//int mag(seqR4 *xR4, seqR1 *ymag){\n// const long Nx=xR4->getN();\n// const long Ny=ymag->getN();\n// long n;\n// int retval=0;\n// vectR4 u;\n// ymag->clear();\n// if(Nx!=Ny){\n// fprintf(stderr,\"ERROR using y=mag(xR4): lengths of xR4 (%ld) and ymag (%ld) differ.\\n\",Nx,Ny);\n// exit(EXIT_FAILURE);\n// }\n// for(n=0;nget(n);\n// ymag->put(n,u.mag());\n// }\n// return retval;\n// }\n\n", "meta": {"hexsha": "a62159806f2404112de19cc2bdf0f9788952fbca", "size": 12895, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "r4.cpp", "max_stars_repo_name": "dgreenhoe/symbolic-sequence-processing", "max_stars_repo_head_hexsha": "8e9f5a40dbddf44fd0fde0a461d7ed73208c9598", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "r4.cpp", "max_issues_repo_name": "dgreenhoe/symbolic-sequence-processing", "max_issues_repo_head_hexsha": "8e9f5a40dbddf44fd0fde0a461d7ed73208c9598", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "r4.cpp", "max_forks_repo_name": "dgreenhoe/symbolic-sequence-processing", "max_forks_repo_head_hexsha": "8e9f5a40dbddf44fd0fde0a461d7ed73208c9598", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2227602906, "max_line_length": 120, "alphanum_fraction": 0.3499806126, "num_tokens": 2909, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7662936324115011, "lm_q2_score": 0.6513548646660543, "lm_q1q2_score": 0.4991290852338524}} {"text": "/* Copyright (c) 2010-2019, Delft University of Technology\r\n * All rigths reserved\r\n *\r\n * This file is part of the Tudat. Redistribution and use in source and\r\n * binary forms, with or without modification, are permitted exclusively\r\n * under the terms of the Modified BSD license. You should have received\r\n * a copy of the license with this file. If not, please or visit:\r\n * http://tudat.tudelft.nl/LICENSE.\r\n */\r\n\r\n\r\n#define BOOST_TEST_MAIN\r\n\r\n#include \r\n#include \r\n#include \r\n\r\n#include \"Tudat/Mathematics/BasicMathematics/leastSquaresEstimation.h\"\r\n#include \"Tudat/External/SpiceInterface/spiceInterface.h\"\r\n#include \"Tudat/SimulationSetup/tudatSimulationHeader.h\"\r\n#include \"Tudat/Mathematics/Statistics/basicStatistics.h\"\r\nnamespace tudat\r\n{\r\n\r\nnamespace unit_tests\r\n{\r\n\r\nusing namespace tudat::simulation_setup;\r\nusing namespace tudat::propagators;\r\nusing namespace tudat::numerical_integrators;\r\nusing namespace tudat::orbital_element_conversions;\r\nusing namespace tudat::basic_mathematics;\r\nusing namespace tudat::unit_conversions;\r\n\r\ndouble computeLenseThirringPericenterPrecession(\r\n const double gravitationalParameter,\r\n const double angularMomentum,\r\n const double semiMajorAxis,\r\n const double eccentricity,\r\n const double inclination )\r\n{\r\n return - 6.0 * gravitationalParameter * angularMomentum * std::cos(inclination ) /\r\n ( physical_constants::SPEED_OF_LIGHT * physical_constants::SPEED_OF_LIGHT * semiMajorAxis * semiMajorAxis * semiMajorAxis *\r\n std::pow( 1.0 - eccentricity * eccentricity, 1.5 ) );\r\n}\r\n\r\ndouble computeLenseThirringNodePrecession(\r\n const double gravitationalParameter,\r\n const double angularMomentum,\r\n const double semiMajorAxis,\r\n const double eccentricity )\r\n{\r\n return 2.0 * gravitationalParameter * angularMomentum /\r\n ( physical_constants::SPEED_OF_LIGHT * physical_constants::SPEED_OF_LIGHT * semiMajorAxis * semiMajorAxis * semiMajorAxis *\r\n std::pow( 1.0 - eccentricity * eccentricity, 1.5 ) );\r\n}\r\n\r\ndouble computeSchwarzschildPericenterPrecession(\r\n const double gravitationalParameter,\r\n const double semiMajorAxis,\r\n const double eccentricity )\r\n{\r\n return 3.0 * std::pow( gravitationalParameter, 1.5 ) /\r\n ( physical_constants::SPEED_OF_LIGHT * physical_constants::SPEED_OF_LIGHT * std::pow(\r\n semiMajorAxis, 2.5 ) *( 1.0 - eccentricity * eccentricity ) );\r\n}\r\n\r\ndouble computeDeSitterPericenterPrecession( const double meanDistanceEarthToSun,\r\n const double meanEccentricity )\r\n{\r\n return 1.5 * 1.327124E20 / ( physical_constants::SPEED_OF_LIGHT * physical_constants::SPEED_OF_LIGHT * meanDistanceEarthToSun ) * 2.0 * mathematical_constants::PI /\r\n ( physical_constants::JULIAN_YEAR ) * std::sqrt( 1.0 - meanEccentricity * meanEccentricity );\r\n}\r\n\r\nBOOST_AUTO_TEST_SUITE( test_relativistic_acceleration_corrections )\r\n\r\nvoid testControlPropagation(\r\n Eigen::Vector6d asterixInitialStateInKeplerianElements,\r\n std::vector< std::map< double, double > > elementMaps,\r\n double earthGravitationalParameter )\r\n{\r\n std::vector< double > polynomialPowers = { 0, 1 };\r\n for( unsigned elementIndex = 0; elementIndex < 5; elementIndex++ )\r\n {\r\n std::vector< double > fitOutput = linear_algebra::getLeastSquaresPolynomialFit(\r\n elementMaps[ elementIndex ], polynomialPowers );\r\n BOOST_CHECK_CLOSE_FRACTION( asterixInitialStateInKeplerianElements( elementIndex ), fitOutput.at( 0 ), 1.0E-10 );\r\n if( elementIndex == 1 )\r\n {\r\n BOOST_CHECK_SMALL( fitOutput.at( 1 ), 1.0E-18 );\r\n }\r\n else\r\n {\r\n BOOST_CHECK_SMALL( fitOutput.at( 1 ), 1.0E-12 );\r\n }\r\n }\r\n}\r\n\r\nvoid testLenseThirringPropagation(\r\n Eigen::Vector6d asterixInitialStateInKeplerianElements,\r\n std::vector< std::map< double, double > > elementMaps,\r\n double earthGravitationalParameter )\r\n{\r\n double theoreticalLenseThirringPericenterPrecession =\r\n computeLenseThirringPericenterPrecession(\r\n earthGravitationalParameter, 1.0E9, asterixInitialStateInKeplerianElements( semiMajorAxisIndex ),\r\n asterixInitialStateInKeplerianElements( eccentricityIndex ),\r\n asterixInitialStateInKeplerianElements( inclinationIndex ) );\r\n double theoreticalLenseThirringNodePrecession =\r\n computeLenseThirringNodePrecession(\r\n earthGravitationalParameter, 1.0E9, asterixInitialStateInKeplerianElements( semiMajorAxisIndex ),\r\n asterixInitialStateInKeplerianElements( eccentricityIndex ) );\r\n\r\n std::vector< double > polynomialPowers = { 0, 1 };\r\n for( unsigned elementIndex = 0; elementIndex < 5; elementIndex++ )\r\n {\r\n std::vector< double > fitOutput = linear_algebra::getLeastSquaresPolynomialFit(\r\n elementMaps[ elementIndex ], polynomialPowers );\r\n BOOST_CHECK_CLOSE_FRACTION( asterixInitialStateInKeplerianElements( elementIndex ), fitOutput.at( 0 ), 1.0E-10 );\r\n if( elementIndex == 1 )\r\n {\r\n BOOST_CHECK_SMALL( fitOutput.at( 1 ), 1.0E-18 );\r\n }\r\n else if( elementIndex == 3 )\r\n {\r\n BOOST_CHECK_CLOSE_FRACTION( fitOutput.at( 1 ), theoreticalLenseThirringPericenterPrecession, 1.0E-5 );\r\n }\r\n else if( elementIndex == 4 )\r\n {\r\n BOOST_CHECK_CLOSE_FRACTION( fitOutput.at( 1 ), theoreticalLenseThirringNodePrecession, 1.0E-5 );\r\n }\r\n else\r\n {\r\n BOOST_CHECK_SMALL( fitOutput.at( 1 ), 1.0E-12 );\r\n }\r\n }\r\n}\r\n\r\nvoid testSchwarzschildPropagation(\r\n Eigen::Vector6d asterixInitialStateInKeplerianElements,\r\n std::vector< std::map< double, double > > elementMaps,\r\n double earthGravitationalParameter )\r\n{\r\n double theoreticalSchwarzschildPericenterPrecession =\r\n computeSchwarzschildPericenterPrecession(\r\n earthGravitationalParameter, asterixInitialStateInKeplerianElements( semiMajorAxisIndex ),\r\n asterixInitialStateInKeplerianElements( eccentricityIndex ) );\r\n\r\n std::vector< double > polynomialPowers = { 0, 1 };\r\n for( unsigned elementIndex = 0; elementIndex < 5; elementIndex++ )\r\n {\r\n std::vector< double > fitOutput = linear_algebra::getLeastSquaresPolynomialFit(\r\n elementMaps[ elementIndex ], polynomialPowers );\r\n if( elementIndex != 1 )\r\n {\r\n BOOST_CHECK_CLOSE_FRACTION( asterixInitialStateInKeplerianElements( elementIndex ), fitOutput.at( 0 ), 1.0E-8 );\r\n }\r\n else\r\n {\r\n BOOST_CHECK_CLOSE_FRACTION( asterixInitialStateInKeplerianElements( elementIndex ), fitOutput.at( 0 ), 1.0E-7 );\r\n }\r\n if( elementIndex == 1 )\r\n {\r\n BOOST_CHECK_SMALL( fitOutput.at( 1 ), 1.0E-16 );\r\n }\r\n else if( elementIndex == 3 )\r\n {\r\n BOOST_CHECK_CLOSE_FRACTION( fitOutput.at( 1 ), theoreticalSchwarzschildPericenterPrecession, 1.0E-5 );\r\n }\r\n else\r\n {\r\n BOOST_CHECK_SMALL( fitOutput.at( 1 ), 1.0E-10 );\r\n }\r\n }\r\n}\r\n\r\nvoid testDeSitterPropagation(\r\n Eigen::Vector6d asterixInitialStateInKeplerianElements,\r\n std::vector< std::map< double, double > > elementMaps,\r\n double meanDistanceEarthToSun,\r\n double meanEarthEccentricity )\r\n{\r\n std::vector< double > polynomialPowers = { 0, 1 };\r\n for( unsigned elementIndex = 0; elementIndex < 5; elementIndex++ )\r\n {\r\n std::vector< double > fitOutput = linear_algebra::getLeastSquaresPolynomialFit(\r\n elementMaps[ elementIndex ], polynomialPowers );\r\n if( elementIndex != 4 )\r\n {\r\n BOOST_CHECK_CLOSE_FRACTION( asterixInitialStateInKeplerianElements( elementIndex ), fitOutput.at( 0 ), 1.0E-10 );\r\n }\r\n else\r\n {\r\n BOOST_CHECK_CLOSE_FRACTION( asterixInitialStateInKeplerianElements( elementIndex ), fitOutput.at( 0 ), 1.0E-8 );\r\n }\r\n if( elementIndex == 1 )\r\n {\r\n BOOST_CHECK_SMALL( fitOutput.at( 1 ), 1.0E-18 );\r\n }\r\n else if( elementIndex == 4 )\r\n {\r\n BOOST_CHECK_CLOSE_FRACTION( fitOutput.at( 1 ), computeDeSitterPericenterPrecession(\r\n meanDistanceEarthToSun, meanEarthEccentricity ), 2.5E-2 );\r\n }\r\n else\r\n {\r\n BOOST_CHECK_SMALL( fitOutput.at( 1 ), 1.0E-12 );\r\n }\r\n }\r\n}\r\nBOOST_AUTO_TEST_CASE( testLenseThirring )\r\n{\r\n // Load Spice kernels.\r\n spice_interface::loadStandardSpiceKernels( );\r\n\r\n // Set simulation end epoch.\r\n const double simulationEndEpoch = 0.25 * tudat::physical_constants::JULIAN_YEAR;\r\n\r\n\r\n // Create body objects.\r\n std::vector< std::string > bodiesToCreate;\r\n bodiesToCreate.push_back( \"Earth\" );\r\n bodiesToCreate.push_back( \"Sun\" );\r\n\r\n std::map< std::string, std::shared_ptr< BodySettings > > bodySettings =\r\n getDefaultBodySettings( bodiesToCreate );\r\n\r\n // Create Earth object\r\n NamedBodyMap bodyMap = createBodies( bodySettings );\r\n\r\n // Create spacecraft object.\r\n bodyMap[ \"Asterix\" ] = std::make_shared< simulation_setup::Body >( );\r\n\r\n // Finalize body creation.\r\n setGlobalFrameBodyEphemerides( bodyMap, \"SSB\", \"ECLIPJ2000\" );\r\n\r\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\r\n /////////////////////// CREATE ACCELERATIONS //////////////////////////////////////////////////////\r\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\r\n\r\n for( unsigned int testCase = 0; testCase < 4; testCase++ )\r\n {\r\n // Define propagator settings variables.\r\n SelectedAccelerationMap accelerationMap;\r\n std::vector< std::string > bodiesToPropagate;\r\n std::vector< std::string > centralBodies;\r\n\r\n bodiesToPropagate.push_back( \"Asterix\" );\r\n centralBodies.push_back( \"Earth\" );\r\n\r\n // Define propagation settings.\r\n std::map< std::string, std::vector< std::shared_ptr< AccelerationSettings > > > accelerationsOfAsterix;\r\n accelerationsOfAsterix[ \"Earth\" ].push_back( std::make_shared< AccelerationSettings >(\r\n basic_astrodynamics::central_gravity ) );\r\n if( testCase == 1 )\r\n {\r\n accelerationsOfAsterix[ \"Earth\" ].push_back( std::make_shared< RelativisticAccelerationCorrectionSettings >(\r\n false, true, false, \"\", 1.0E9 * Eigen::Vector3d::UnitZ( ) ) );\r\n }\r\n if( testCase == 2 )\r\n {\r\n accelerationsOfAsterix[ \"Earth\" ].push_back( std::make_shared< RelativisticAccelerationCorrectionSettings >(\r\n true, false, false ) );\r\n }\r\n if( testCase == 3 )\r\n {\r\n accelerationsOfAsterix[ \"Earth\" ].push_back( std::make_shared< RelativisticAccelerationCorrectionSettings >(\r\n false, false, true, \"Sun\" ) );\r\n }\r\n accelerationMap[ \"Asterix\" ] = accelerationsOfAsterix;\r\n\r\n // Create acceleration models and propagation settings.\r\n basic_astrodynamics::AccelerationMap accelerationModelMap = createAccelerationModelsMap(\r\n bodyMap, accelerationMap, bodiesToPropagate, centralBodies );\r\n\r\n\r\n\r\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\r\n /////////////////////// CREATE PROPAGATION SETTINGS ////////////////////////////////////////////\r\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\r\n\r\n // Set initial conditions for the Asterix satellite that will be propagated in this simulation.\r\n // The initial conditions are given in Keplerian elements and later on converted to Cartesian\r\n // elements.\r\n\r\n // Set Keplerian elements for Asterix.\r\n Eigen::Vector6d asterixInitialStateInKeplerianElements;\r\n asterixInitialStateInKeplerianElements( semiMajorAxisIndex ) = 5000.0E3;\r\n asterixInitialStateInKeplerianElements( eccentricityIndex ) = 0.2;\r\n asterixInitialStateInKeplerianElements( inclinationIndex ) = convertDegreesToRadians( 65.3 );\r\n asterixInitialStateInKeplerianElements( argumentOfPeriapsisIndex )\r\n = convertDegreesToRadians( 235.7 );\r\n asterixInitialStateInKeplerianElements( longitudeOfAscendingNodeIndex )\r\n = convertDegreesToRadians( 23.4 );\r\n asterixInitialStateInKeplerianElements( trueAnomalyIndex ) = convertDegreesToRadians( 0.0 );\r\n\r\n // Convert Asterix state from Keplerian elements to Cartesian elements.\r\n double earthGravitationalParameter = bodyMap.at( \"Earth\" )->getGravityFieldModel( )->getGravitationalParameter( );\r\n Eigen::VectorXd systemInitialState = convertKeplerianToCartesianElements(\r\n asterixInitialStateInKeplerianElements,\r\n earthGravitationalParameter );\r\n\r\n std::shared_ptr< TranslationalStatePropagatorSettings< double > > propagatorSettings =\r\n std::make_shared< TranslationalStatePropagatorSettings< double > >\r\n ( centralBodies, accelerationModelMap, bodiesToPropagate, systemInitialState, simulationEndEpoch, encke );\r\n\r\n\r\n // Create numerical integrator.\r\n std::shared_ptr< IntegratorSettings< > > integratorSettings =\r\n std::make_shared< RungeKuttaVariableStepSizeSettings< > >\r\n ( 0.0, 10.0,\r\n RungeKuttaCoefficients::rungeKuttaFehlberg78, 1.0E-3, 1.0E3, 1.0E-12, 1.0E-12 );\r\n\r\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\r\n /////////////////////// PROPAGATE ORBIT ////////////////////////////////////////////////////////\r\n ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\r\n\r\n // Create simulation object and propagate dynamics.\r\n SingleArcDynamicsSimulator< > dynamicsSimulator(\r\n bodyMap, integratorSettings, propagatorSettings );\r\n std::map< double, Eigen::VectorXd > integrationResult = dynamicsSimulator.getEquationsOfMotionNumericalSolution( );\r\n std::map< double, Eigen::VectorXd > keplerianIntegrationResult;\r\n\r\n // Compute map of Kepler elements\r\n Eigen::Vector6d currentCartesianState;\r\n std::vector< std::map< double, double > > elementMaps;\r\n elementMaps.resize( 6 );\r\n\r\n std::vector< double > solarDistances;\r\n std::vector< double > earthSemiMajorAxes;\r\n std::vector< double > earthEccentricities;\r\n\r\n Eigen::Vector6d earthKeplerianState;\r\n Eigen::Vector6d earthCartesianState;\r\n\r\n for( std::map< double, Eigen::VectorXd >::const_iterator stateIterator = integrationResult.begin( );\r\n stateIterator != integrationResult.end( ); stateIterator++ )\r\n {\r\n // Retrieve current Cartesian state (convert to Moon-centered frame if needed)\r\n currentCartesianState = stateIterator->second;\r\n keplerianIntegrationResult[ stateIterator->first ] =\r\n convertCartesianToKeplerianElements(\r\n currentCartesianState, earthGravitationalParameter );\r\n for( unsigned elementIndex = 0; elementIndex < 6; elementIndex++ )\r\n {\r\n elementMaps[ elementIndex ][ stateIterator->first ] = keplerianIntegrationResult[ stateIterator->first ]( elementIndex );\r\n }\r\n\r\n if( testCase == 3 )\r\n {\r\n earthCartesianState = spice_interface:: getBodyCartesianStateAtEpoch(\r\n \"Earth\", \"Sun\", \"ECLIPJ2000\", \"None\", stateIterator->first );\r\n earthKeplerianState = convertCartesianToKeplerianElements(\r\n earthCartesianState, spice_interface::getBodyGravitationalParameter( \"Sun\" ) );\r\n\r\n earthSemiMajorAxes.push_back( earthKeplerianState( 0 ) );\r\n earthEccentricities.push_back( earthKeplerianState( 1 ) );\r\n\r\n solarDistances.push_back( earthCartesianState.segment( 0, 3 ).norm( ) );\r\n }\r\n }\r\n\r\n if( testCase == 0 )\r\n {\r\n testControlPropagation( asterixInitialStateInKeplerianElements, elementMaps, earthGravitationalParameter );\r\n }\r\n else if( testCase == 1 )\r\n {\r\n testLenseThirringPropagation( asterixInitialStateInKeplerianElements, elementMaps, earthGravitationalParameter );\r\n }\r\n else if( testCase == 2 )\r\n {\r\n testSchwarzschildPropagation( asterixInitialStateInKeplerianElements, elementMaps, earthGravitationalParameter );\r\n }\r\n else if( testCase == 3 )\r\n {\r\n testDeSitterPropagation(\r\n asterixInitialStateInKeplerianElements, elementMaps, statistics::computeSampleMean( solarDistances ),\r\n statistics::computeSampleMean( earthEccentricities ) );\r\n }\r\n }\r\n}\r\n\r\nBOOST_AUTO_TEST_SUITE_END( )\r\n\r\n}\r\n\r\n}\r\n", "meta": {"hexsha": "43a859098c06b424dd0c7b3f6d67c52363f801e1", "size": 17838, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Tudat/Astrodynamics/Relativity/UnitTests/unitTestRelativisticAccelerationCorrection.cpp", "max_stars_repo_name": "sebranchett/tudat", "max_stars_repo_head_hexsha": "24e5f3cc85c250fcbed0aac37f026c1dd7fd6c44", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Tudat/Astrodynamics/Relativity/UnitTests/unitTestRelativisticAccelerationCorrection.cpp", "max_issues_repo_name": "sebranchett/tudat", "max_issues_repo_head_hexsha": "24e5f3cc85c250fcbed0aac37f026c1dd7fd6c44", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Tudat/Astrodynamics/Relativity/UnitTests/unitTestRelativisticAccelerationCorrection.cpp", "max_forks_repo_name": "sebranchett/tudat", "max_forks_repo_head_hexsha": "24e5f3cc85c250fcbed0aac37f026c1dd7fd6c44", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.8560411311, "max_line_length": 169, "alphanum_fraction": 0.6023657361, "num_tokens": 3934, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7879311956428947, "lm_q2_score": 0.63341027751814, "lm_q1q2_score": 0.49908371729736584}} {"text": "// $Id$\n//\n// Copyright (C) 2003-2009 Greg Landrum and Rational Discovery LLC\n//\n// @@ All Rights Reserved @@\n// This file is part of the RDKit.\n// The contents are covered by the terms of the BSD license\n// which is included in the file license.txt, found at the root\n// of the RDKit source tree.\n//\n#include \"MolOps.h\"\n#include \"RDKitBase.h\"\n#include \n#include \n\n#include \n#include \n\nnamespace RDKit {\nnamespace MolOps {\ndouble computeBalabanJ(double *distMat, int nb, int nAts) {\n // NOTE that the distance matrix is modified here for the sake of\n // efficiency\n PRECONDITION(distMat, \"bogus distance matrix\")\n double sum = 0.0;\n int nActive = nAts;\n int mu = nb - nActive + 1;\n\n if (mu == -1) return 0.0;\n\n for (int i = 0; i < nAts; i++) {\n int iTab = i * nAts;\n sum = 0.0;\n for (int j = 0; j < nAts; j++) {\n if (j != i) {\n sum += distMat[iTab + j];\n }\n }\n distMat[iTab + i] *= sum;\n }\n double accum = 0.0;\n for (int i = 0; i < nAts; i++) {\n int iTab = i * nAts + i;\n for (int j = i + 1; j < nAts; j++) {\n // NOTE: this isn't strictly the Balaban J value, because we\n // aren't only adding in adjacent atoms. Since we're doing a\n // discriminator, that shouldn't be a problem.\n if (j != i) {\n accum += (1.0 / sqrt(distMat[iTab] * distMat[j * nAts + j]));\n }\n }\n }\n return nActive / ((mu + 1) * accum);\n}\n\ndouble computeBalabanJ(const ROMol &mol, bool useBO, bool force,\n const std::vector *bondPath, bool cacheIt) {\n RDUNUSED_PARAM(useBO);\n double res = 0.0;\n if (!force && mol.hasProp(common_properties::BalabanJ)) {\n mol.getProp(common_properties::BalabanJ, res);\n } else {\n double *dMat;\n int nb = 0, nAts = 0;\n if (bondPath) {\n boost::dynamic_bitset<> atomsUsed(mol.getNumAtoms());\n boost::dynamic_bitset<> bondsUsed(mol.getNumBonds());\n for (int ci : *bondPath) {\n bondsUsed[ci] = 1;\n }\n std::vector bonds;\n bonds.reserve(bondPath->size());\n std::vector atomsInPath;\n atomsInPath.reserve(bondPath->size() + 1);\n\n ROMol::EDGE_ITER beg, end;\n boost::tie(beg, end) = mol.getEdges();\n while (beg != end) {\n const Bond *bond = mol[*beg];\n if (bondsUsed[bond->getIdx()]) {\n int begIdx = bond->getBeginAtomIdx();\n int endIdx = bond->getEndAtomIdx();\n bonds.push_back(bond);\n if (!atomsUsed[begIdx]) {\n atomsInPath.push_back(begIdx);\n atomsUsed[begIdx] = 1;\n }\n if (!atomsUsed[endIdx]) {\n atomsInPath.push_back(endIdx);\n atomsUsed[endIdx] = 1;\n }\n }\n beg++;\n }\n nb = rdcast(bondPath->size());\n nAts = rdcast(atomsInPath.size());\n dMat = MolOps::getDistanceMat(mol, atomsInPath, bonds, true, true);\n res = computeBalabanJ(dMat, nb, nAts);\n delete[] dMat;\n } else {\n nb = mol.getNumBonds();\n nAts = mol.getNumAtoms();\n dMat = MolOps::getDistanceMat(mol, true, true, true, nullptr);\n res = computeBalabanJ(dMat, nb, nAts);\n delete[] dMat;\n }\n\n if (cacheIt) mol.setProp(common_properties::BalabanJ, res, true);\n }\n return res;\n}\n} // end of namespace MolOps\n} // end of namespace RDKit\n", "meta": {"hexsha": "79594739693b2ad513693f030f07df38114606da", "size": 3391, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "modified_rdkit/Code/GraphMol/MolDiscriminators.cpp", "max_stars_repo_name": "hjuinj/RDKit_mETKDG", "max_stars_repo_head_hexsha": "b270e765caa61d289e9e33595d4264b156f9062e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2020-03-30T04:00:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-31T01:32:13.000Z", "max_issues_repo_path": "modified_rdkit/Code/GraphMol/MolDiscriminators.cpp", "max_issues_repo_name": "hjuinj/RDKit_mETKDG", "max_issues_repo_head_hexsha": "b270e765caa61d289e9e33595d4264b156f9062e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2020-05-23T17:31:04.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-26T06:52:47.000Z", "max_forks_repo_path": "modified_rdkit/Code/GraphMol/MolDiscriminators.cpp", "max_forks_repo_name": "hjuinj/RDKit_mETKDG", "max_forks_repo_head_hexsha": "b270e765caa61d289e9e33595d4264b156f9062e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2020-03-30T04:00:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-25T23:11:52.000Z", "avg_line_length": 29.7456140351, "max_line_length": 73, "alphanum_fraction": 0.5818342672, "num_tokens": 1024, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7879311956428946, "lm_q2_score": 0.63341027059799, "lm_q1q2_score": 0.4990837118447637}} {"text": "//==================================================================================================\n/*!\n @file\n\n @copyright 2016 NumScale SAS\n @copyright 2016 J.T. Lapreste\n\n Distributed under the Boost Software License, Version 1.0.\n (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)\n*/\n//==================================================================================================\n#ifndef BOOST_SIMD_FUNCTION_SIGNIFICANTS_HPP_INCLUDED\n#define BOOST_SIMD_FUNCTION_SIGNIFICANTS_HPP_INCLUDED\n\n#if defined(DOXYGEN_ONLY)\nnamespace boost { namespace simd\n{\n\n /*!\n\n @ingroup group-exponential\n Function object implementing significants capabilities\n\n Compute the rounding to n significants digits\n\n @par Semantic:\n\n For every parameter of floating type T and strictly positive integer n\n\n @code\n T r = significants(x, n);\n @endcode\n\n is equivalent to round(x, m) where m is n-iceil(log10(abs(x)))\n\n @see round, iceil, log10\n\n **/\n const boost::dispatch::functor significants = {};\n} }\n#endif\n\n#include \n#include \n\n#endif\n", "meta": {"hexsha": "285dfb97b81b88024e3575dfe463cf5154576fc4", "size": 1198, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/boost/simd/function/significants.hpp", "max_stars_repo_name": "yaeldarmon/boost.simd", "max_stars_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/boost/simd/function/significants.hpp", "max_issues_repo_name": "yaeldarmon/boost.simd", "max_issues_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/boost/simd/function/significants.hpp", "max_forks_repo_name": "yaeldarmon/boost.simd", "max_forks_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.4893617021, "max_line_length": 100, "alphanum_fraction": 0.6118530885, "num_tokens": 255, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7879311956428946, "lm_q2_score": 0.63341027059799, "lm_q1q2_score": 0.4990837118447637}} {"text": "//=======================================================================\r\n// Copyright 2001 Jeremy G. Siek, Andrew Lumsdaine, Lie-Quan Lee, \r\n//\r\n// Distributed under the Boost Software License, Version 1.0. (See\r\n// accompanying file LICENSE_1_0.txt or copy at\r\n// http://www.boost.org/LICENSE_1_0.txt)\r\n//=======================================================================\r\n\r\n#ifdef BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION\r\n#error The vector_as_graph.hpp header requires partial specialization\r\n#endif\r\n\r\n#include \r\n#include \r\n#include // needed by graph_utility. -Jeremy\r\n#include \r\n#include \r\n\r\nint\r\nmain()\r\n{\r\n enum\r\n { r, s, t, u, v, w, x, y, N };\r\n char name[] = \"rstuvwxy\";\r\n typedef std::vector < std::list < int > > Graph;\r\n Graph g(N);\r\n g[r].push_back(v);\r\n g[s].push_back(r);\r\n g[s].push_back(r);\r\n g[s].push_back(w);\r\n g[t].push_back(x);\r\n g[u].push_back(t);\r\n g[w].push_back(t);\r\n g[w].push_back(x);\r\n g[x].push_back(y);\r\n g[y].push_back(u);\r\n boost::print_graph(g, name);\r\n return 0;\r\n}\r\n", "meta": {"hexsha": "c987d188a3fd0208bfdf4f902fce7aca08a29c70", "size": 1116, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "libs/graph/example/vector-as-graph.cpp", "max_stars_repo_name": "zyiacas/boost-doc-zh", "max_stars_repo_head_hexsha": "689e5a3a0a4dbead1a960f7b039e3decda54aa2c", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 198.0, "max_stars_repo_stars_event_min_datetime": "2015-01-13T05:47:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T04:46:46.000Z", "max_issues_repo_path": "libs/graph/example/vector-as-graph.cpp", "max_issues_repo_name": "sdfict/boost-doc-zh", "max_issues_repo_head_hexsha": "689e5a3a0a4dbead1a960f7b039e3decda54aa2c", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 9.0, "max_issues_repo_issues_event_min_datetime": "2015-01-28T16:33:19.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-12T23:03:28.000Z", "max_forks_repo_path": "libs/graph/example/vector-as-graph.cpp", "max_forks_repo_name": "sdfict/boost-doc-zh", "max_forks_repo_head_hexsha": "689e5a3a0a4dbead1a960f7b039e3decda54aa2c", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 139.0, "max_forks_repo_forks_event_min_datetime": "2015-01-15T20:09:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-31T15:21:16.000Z", "avg_line_length": 27.9, "max_line_length": 74, "alphanum_fraction": 0.5681003584, "num_tokens": 289, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. Yes\n2. Yes", "lm_q1_score": 0.6334102636778403, "lm_q2_score": 0.7879312031126512, "lm_q1q2_score": 0.4990837111235823}} {"text": "#include \"smooth_pair_finder.h\"\n\n#include \n\n#include \"norm_finding.h\"\n#include \"util.h\"\n\nnamespace {\n using namespace NTL;\n}\n\nnamespace gnfs {\n SmoothPairFinder::SmoothPairFinder(const ZZ& n, long B, const ZZX& f, const ZZ& m)\n : n_(n), B_(B), f_(f), m_(m), M_(B), Mold_(0), a_(B), b_(-1) {\n ZZX fprime = diff(f);\n\n // Figure out what the factorbase columns represent.\n // Each one is a pair (p,r), where p is a prime up to B\n // and r is something where f(r) \\equiv 0 (mod p).\n PrimeSeq pseq;\n long p;\n while ((p = pseq.next()) <= B) {\n cols_modular_.push_back(p);\n for (long r = 0; r < p; r++) {\n ZZ res = eval(f, ZZ(r));\n if (res % p == 0)\n cols_algebraic_.emplace_back(p, r);\n }\n }\n\n // Generate pairs (q,s) for Adleman columns.\n // For each q (named p here), find an s where\n // f(s) \\equiv 0 (mod p) and f'(s) \\not\\equiv 0 (mod p).\n long k = conv(3 * log(conv(n)));\n long k_so_far = 0;\n while (k_so_far < k) {\n for (long s = 0; s < p; s++) {\n ZZ zs(s);\n if (eval(f, zs) % p == 0 && eval(fprime, zs) % p != 0) {\n cols_adleman_.emplace_back(p, s);\n k_so_far++;\n }\n }\n\n p = pseq.next();\n }\n }\n\n bool SmoothPairFinder::add_cols_modular(long a, long b, vec_GF2& ret) {\n ZZ ammb = a - m_ * b;\n ammb %= n_;\n\n if (ammb < 0) {\n ammb *= -1;\n ret.append(GF2(1));\n } else {\n ret.append(GF2(0));\n }\n\n for (const auto& p : cols_modular_) {\n bool divcount = false;\n while (ammb % p == 0) {\n ammb /= p;\n divcount ^= true;\n }\n ret.append(divcount ? GF2(1) : GF2(0));\n }\n\n return ammb == 1;\n }\n\n bool SmoothPairFinder::add_cols_algebraic(long a, long b, vec_GF2& ret) {\n ZZ namab = eval_norm(f_, ZZ(a), ZZ(b));\n\n // Iterate through each prime.\n // For each one, find r first,\n // then set the appropriate entry in the index vector.\n\n // Here, we'll keep a cursor into cols_algebraic_.\n auto alg_it = cols_algebraic_.cbegin();\n\n for (const auto& p : cols_modular_) {\n long r = lincon(a, b, p);\n\n // Get the divcount as before\n bool divcount = false;\n while (namab % p == 0) {\n namab /= p;\n divcount ^= true;\n }\n GF2 to_add = divcount ? GF2(1) : GF2(0);\n\n // Scroll past algebraic factorbase entries\n // until we find where we want to add this item.\n // Then add it.\n while (alg_it->first != p || alg_it->second != r) {\n ++alg_it;\n ret.append(GF2(0));\n }\n ++alg_it;\n ret.append(to_add);\n }\n\n // Add the rest of the zeros to the vector that we return.\n while (alg_it != cols_algebraic_.cend()) {\n ++alg_it;\n ret.append(GF2(0));\n }\n\n return namab == 1;\n }\n\n bool SmoothPairFinder::add_cols_adleman(long a, long b, NTL::vec_GF2& ret) {\n for (const auto& qs : cols_adleman_) {\n long q = qs.first;\n long s = qs.second;\n long jacobi_top = a - b * s;\n long jacobi = Jacobi(ZZ(jacobi_top % q), ZZ(q));\n ret.append(jacobi == -1 ? GF2(1) : GF2(0));\n }\n\n return true;\n }\n\n vec_GF2 SmoothPairFinder::generate_row(long a, long b) {\n vec_GF2 ret; // if this length != num_cols, invalid\n // we always return ret so we can have the return value optimzn\n // even if ret ends up being invalid\n\n // Modular factorbase and -1.\n if (!add_cols_modular(a, b, ret)) {\n // ammb is not B-smooth\n // make SURE the length is invalid\n if (ret.length() == num_cols()) ret.append(GF2(0));\n return ret;\n }\n\n // Algebraic factorbase.\n if (!add_cols_algebraic(a, b, ret)) {\n // namab is not B-smooth\n // make SURE the length is invalid\n if (ret.length() == num_cols()) ret.append(GF2(0));\n return ret;\n }\n\n // Adleman columns.\n if (!add_cols_adleman(a, b, ret)) {\n // result is wrong\n // make SURE the length is invalid\n if (ret.length() == num_cols()) ret.append(GF2(0));\n return ret;\n }\n\n return ret;\n }\n\n std::pair, vec_GF2> SmoothPairFinder::get() {\n // TODO\n return std::make_pair(std::make_pair(0, 0), vec_GF2(INIT_SIZE, num_cols()));\n }\n}\n", "meta": {"hexsha": "ae69542a8f4c9e4064c6338e8e08a23ac8a3ea2c", "size": 4873, "ext": "cc", "lang": "C++", "max_stars_repo_path": "src/smooth_pair_finder.cc", "max_stars_repo_name": "MathSquared/general-number-field-sieve", "max_stars_repo_head_hexsha": "0ab4efd447f24b726597ec9a6ddae669b1709a20", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2018-05-25T09:36:07.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-23T11:54:46.000Z", "max_issues_repo_path": "src/smooth_pair_finder.cc", "max_issues_repo_name": "MathSquared/general-number-field-sieve", "max_issues_repo_head_hexsha": "0ab4efd447f24b726597ec9a6ddae669b1709a20", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2021-05-06T10:34:07.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-06T10:34:07.000Z", "max_forks_repo_path": "src/smooth_pair_finder.cc", "max_forks_repo_name": "MathSquared/general-number-field-sieve", "max_forks_repo_head_hexsha": "0ab4efd447f24b726597ec9a6ddae669b1709a20", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0802469136, "max_line_length": 86, "alphanum_fraction": 0.482864765, "num_tokens": 1321, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7879311956428947, "lm_q2_score": 0.6334102636778401, "lm_q1q2_score": 0.49908370639216176}} {"text": "#include \"stdafx.h\"\n#include \"Math.h\"\n#include \n\nnamespace\n{\nconstexpr double PI = boost::math::constants::pi();\n}\n\ndouble DegreesToRadians(double degrees)\n{\n\treturn degrees * PI / 180.0;\n}\n\ndouble RadiansToDegrees(double radians)\n{\n\treturn radians * 180.0 / PI;\n}\n", "meta": {"hexsha": "964aa6b9587a148696c0f16a470130a42aaa1442", "size": 309, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Observer/WindTracking/Math.cpp", "max_stars_repo_name": "chosti34/ood", "max_stars_repo_head_hexsha": "3d74b5253f667d3de1ee610fb7509cf3015ea79c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Observer/WindTracking/Math.cpp", "max_issues_repo_name": "chosti34/ood", "max_issues_repo_head_hexsha": "3d74b5253f667d3de1ee610fb7509cf3015ea79c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9.0, "max_issues_repo_issues_event_min_datetime": "2018-02-09T06:12:29.000Z", "max_issues_repo_issues_event_max_datetime": "2018-06-06T06:26:40.000Z", "max_forks_repo_path": "Observer/WindTracking/Math.cpp", "max_forks_repo_name": "chosti34/ood", "max_forks_repo_head_hexsha": "3d74b5253f667d3de1ee610fb7509cf3015ea79c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.2631578947, "max_line_length": 59, "alphanum_fraction": 0.7216828479, "num_tokens": 74, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES\n\n", "lm_q1_score": 0.7879312056025699, "lm_q2_score": 0.6334102498375401, "lm_q1q2_score": 0.499083701795518}} {"text": " // graph-tool -- a general graph modification and manipulation thingy\n//\n// Copyright (C) 2006-2018 Tiago de Paula Peixoto \n//\n// This program is free software; you can redistribute it and/or\n// modify it under the terms of the GNU General Public License\n// as published by the Free Software Foundation; either version 3\n// of the License, or (at your option) any later version.\n//\n// This program is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU General Public License for more details.\n//\n// You should have received a copy of the GNU General Public License\n// along with this program. If not, see .\n\n#include \n#include \n\n#include \"graph_tool.hh\"\n#include \"hash_map_wrap.hh\"\n#include \"int_part.hh\"\n#include \"util.hh\"\n\ndouble spence(double);\n\nnamespace graph_tool\n{\n\nusing namespace std;\n\nboost::multi_array __q_cache;\n\ndouble log_sum(double a, double b)\n{\n return std::max(a, b) + std::log1p(exp(-abs(a-b)));\n}\n\nvoid init_q_cache(size_t n_max)\n{\n size_t old_n = __q_cache.shape()[0];\n if (old_n >= n_max)\n return;\n\n __q_cache.resize(boost::extents[n_max + 1][n_max + 1]);\n std::fill(__q_cache.data(), __q_cache.data() + __q_cache.num_elements(),\n -std::numeric_limits::infinity());\n\n for (size_t n = 1; n <= n_max; ++n)\n {\n __q_cache[n][1] = 0;\n for (size_t k = 2; k <= n; ++k)\n {\n __q_cache[n][k] = log_sum(__q_cache[n][k], __q_cache[n][k - 1]);\n if (n > k)\n __q_cache[n][k] = log_sum(__q_cache[n][k], __q_cache[n - k][k]);\n }\n }\n}\n\ndouble q_rec(int n, int k)\n{\n if (n <= 0 || k < 1)\n return 0;\n if (k > n)\n k = n;\n if (k == 1)\n return 1;\n return q_rec(n, k - 1) + q_rec(n - k, k);\n}\n\ngt_hash_map, double> __q_memo;\n\ndouble q_rec_memo(int n, int k)\n{\n if (k > n || n <= 0 || k < 1)\n return 0;\n if (k > n)\n k = n;\n if (k == 1)\n return 1;\n auto key = make_pair(n, k);\n auto iter = __q_memo.find(key);\n if (iter != __q_memo.end())\n return iter->second;\n auto res = q_rec_memo(n, k - 1) + q_rec_memo(n - k, k);\n __q_memo[key] = res;\n return res;\n}\n\ndouble log_q_approx_big(size_t n, size_t k)\n{\n double C = M_PI * sqrt(2/3.);\n double S = C * sqrt(n) - log(4 * sqrt(3) * n);\n if (k < n)\n {\n double x = k / sqrt(n) - log(n) / C;\n S -= (2 / C) * exp(-C * x / 2);\n }\n return S;\n}\n\ndouble log_q_approx_small(size_t n, size_t k)\n{\n return lbinom_fast(n - 1, k - 1) - lgamma_fast(k + 1);\n}\n\ndouble get_v(double u, double epsilon=1e-8)\n{\n double v = u;\n double delta = 1;\n while (delta > epsilon)\n {\n // spence(exp(v)) = -spence(exp(-v)) - (v*v)/2\n double n_v = u * sqrt(spence(exp(-v)));\n delta = abs(n_v - v);\n v = n_v;\n }\n return v;\n}\n\ndouble log_q_approx(size_t n, size_t k)\n{\n if (k < pow(n, 1/4.))\n return log_q_approx_small(n, k);\n double u = k / sqrt(n);\n double v = get_v(u);\n double lf = log(v) - log1p(- exp(-v) * (1 + u * u/2)) / 2 - log(2) * 3 / 2.\n - log(u) - log(M_PI);\n double g = 2 * v / u - u * log1p(-exp(-v));\n return lf - log(n) + sqrt(n) * g;\n}\n\n\n} // namespace graph_tool\n", "meta": {"hexsha": "723ed116c3e430db9f35351bc742bc6b6d7c9657", "size": 3497, "ext": "cc", "lang": "C++", "max_stars_repo_path": "graph-tool-2.27/src/graph/inference/support/int_part.cc", "max_stars_repo_name": "Znigneering/CSCI-3154", "max_stars_repo_head_hexsha": "bc318efc73d2a80025b98f5b3e4f7e4819e952e4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "graph-tool-2.27/src/graph/inference/support/int_part.cc", "max_issues_repo_name": "Znigneering/CSCI-3154", "max_issues_repo_head_hexsha": "bc318efc73d2a80025b98f5b3e4f7e4819e952e4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "graph-tool-2.27/src/graph/inference/support/int_part.cc", "max_forks_repo_name": "Znigneering/CSCI-3154", "max_forks_repo_head_hexsha": "bc318efc73d2a80025b98f5b3e4f7e4819e952e4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.5255474453, "max_line_length": 80, "alphanum_fraction": 0.5784958536, "num_tokens": 1097, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7461389930307512, "lm_q2_score": 0.6688802735722129, "lm_q1q2_score": 0.4990776537813043}} {"text": "#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n\nbool in_range(double x, double min_x, double max_x) {\n return (x > min_x) && (x < max_x);\n}\n\ndouble clamp(double x, double min_x, double max_x) {\n if (min_x > max_x) {\n ROS_ERROR(\"[Monitoring] min_x[%lf] > max_x[%lf], swapping!\", min_x, max_x);\n std::swap(min_x, max_x);\n }\n return std::max(std::min(x, max_x), min_x);\n}\n\ndouble dot(const geometry_msgs::Vector3& u, const geometry_msgs::Vector3& v) {\n return u.x * v.x + u.y * v.y + u.z * v.z;\n}\n\ndouble length(const geometry_msgs::Vector3& u) {\n return sqrt(u.x * u.x + u.y * u.y + u.z * u.z);\n}\n\n// TODO: Use geometry_msgs/Point instead of Waypoint?\ngeometry_msgs::Vector3 vector_from_point_to_point(const gauss_msgs::Waypoint& A, const gauss_msgs::Waypoint& B) {\n geometry_msgs::Vector3 AB;\n AB.x = B.x - A.x;\n AB.y = B.y - A.y;\n AB.z = B.z - A.z;\n return AB;\n}\n\n// Signed Distance Fucntion from P to:\n// a sphere centered in C with radius r\ndouble sdSphere(const gauss_msgs::Waypoint& P, const gauss_msgs::Waypoint& C, double r) {\n return length(vector_from_point_to_point(C, P)) - r;\n}\n\n// Signed Distance Fucntion from P to:\n// a segment (A,B) with some radius r\ndouble sdSegment(const gauss_msgs::Waypoint& P, const gauss_msgs::Waypoint& A, const gauss_msgs::Waypoint& B, double r = 0) {\n geometry_msgs::Vector3 AP = vector_from_point_to_point(A, P);\n geometry_msgs::Vector3 AB = vector_from_point_to_point(A, B);\n double h = clamp(dot(AP, AB) / dot(AB, AB), 0.0, 1.0);\n geometry_msgs::Vector3 aux;\n aux.x = AP.x - AB.x * h; // TODO: Use eigen?\n aux.y = AP.y - AB.y * h;\n aux.z = AP.z - AB.z * h;\n return length(aux) - r;\n}\n\ngeometry_msgs::Point translateToPoint(const gauss_msgs::Waypoint& WP) {\n geometry_msgs::Point P;\n P.x = WP.x;\n P.y = WP.y;\n P.z = WP.z;\n return P;\n}\n\nstruct Segment {\n Segment() = default;\n Segment(gauss_msgs::Waypoint A, gauss_msgs::Waypoint B) {\n // if (A == B) { ROS_WARN(\"A == B == [%lf, %lf, %lf, %lf]\", A.x, A.y, A.z, A.stamp.toSec()); } // TODO: compare function\n point_A = A;\n point_B = B;\n t_A = A.stamp.toSec();\n t_B = B.stamp.toSec();\n if (t_A >= t_B) {\n // ROS_WARN(\"t_A[%lf] >= t_B[%lf]\", t_A, t_B);\n }\n }\n\n gauss_msgs::Waypoint point_at_time(double t) const {\n if (t < t_A) {\n // ROS_WARN(\"t[%lf] < t_A[%lf]\", t, t_A);\n return point_A;\n }\n if (t > t_B) {\n // ROS_WARN(\"t[%lf] > t_B[%lf]\", t, t_B);\n return point_B;\n }\n if (t_A == t_B) {\n // ROS_WARN(\"t_A == t_B == %lf\", t_A);\n return point_A;\n }\n if (std::isnan(t)) {\n // ROS_WARN(\"t is NaN\");\n return point_A;\n }\n\n double m = (t - t_A) / (t_B - t_A);\n gauss_msgs::Waypoint point;\n point.x = point_A.x + m * (point_B.x - point_A.x);\n point.y = point_A.y + m * (point_B.y - point_A.y);\n point.z = point_A.z + m * (point_B.z - point_A.z);\n point.stamp.fromSec(t);\n return point;\n }\n\n gauss_msgs::Waypoint point_at_param(double m) const {\n gauss_msgs::Waypoint point;\n point.x = point_A.x + m * (point_B.x - point_A.x);\n point.y = point_A.y + m * (point_B.y - point_A.y);\n point.z = point_A.z + m * (point_B.z - point_A.z);\n double t = t_A + m * (t_B - t_A);\n point.stamp.fromSec(t);\n return point;\n }\n\n visualization_msgs::Marker translateToMarker(int id, std_msgs::ColorRGBA color) {\n visualization_msgs::Marker marker;\n marker.header.stamp = ros::Time::now();\n marker.header.frame_id = \"map\"; // TODO: other?\n marker.ns = \"segments\";\n marker.id = id;\n marker.type = visualization_msgs::Marker::ARROW;\n marker.action = visualization_msgs::Marker::ADD;\n marker.pose.orientation.w = 1;\n marker.scale.x = 5.0; // shaft diameter\n marker.scale.y = 1.5; // head diameter\n marker.scale.z = 1.0; // head length (if not zero)\n marker.color = color;\n marker.lifetime = ros::Duration(1.0); // TODO: pair with frequency\n marker.points.push_back(translateToPoint(point_A));\n marker.points.push_back(translateToPoint(point_B));\n return marker;\n }\n\n friend std::ostream& operator<<(std::ostream& out, const Segment& s);\n gauss_msgs::Waypoint point_A;\n gauss_msgs::Waypoint point_B;\n double t_A = 0;\n double t_B = 0;\n};\n\nstd::ostream& operator<<(std::ostream& out, const Segment& s) {\n out << \"[(\" << s.point_A << \"); (\" << s.point_B << \")]\";\n return out;\n}\n\nstd::pair delta(const Segment& first, const Segment& second) {\n geometry_msgs::Vector3 delta_alpha;\n delta_alpha.x = second.point_A.x - first.point_A.x;\n delta_alpha.y = second.point_A.y - first.point_A.y;\n delta_alpha.z = second.point_A.z - first.point_A.z;\n geometry_msgs::Vector3 delta_beta;\n delta_beta.x = second.point_B.x - first.point_B.x;\n delta_beta.y = second.point_B.y - first.point_B.y;\n delta_beta.z = second.point_B.z - first.point_B.z;\n return std::make_pair(delta_alpha, delta_beta);\n}\n\ndouble sq_distance(const Segment& first, const Segment& second, double mu) {\n if (mu < 0) {\n // ROS_WARN(\"mu[%lf] < 0, clamping!\", mu);\n mu = 0;\n }\n\n if (mu > 1) {\n // ROS_WARN(\"mu[%lf] > 1, clamping!\", mu);\n mu = 1;\n }\n\n auto d = delta(first, second);\n double delta_x = d.first.x + mu * (d.second.x - d.first.x);\n double delta_y = d.first.y + mu * (d.second.y - d.first.y);\n double delta_z = d.first.z + mu * (d.second.z - d.first.z);\n return pow(delta_x, 2) + pow(delta_y, 2) + pow(delta_z, 2);\n}\n\nstd::pair quadratic_roots(double a, double b, double c) {\n if ((a == 0) && (b == 0) && (c == 0)) {\n // ROS_WARN(\"a = b = c = 0, any number is a solution!\");\n return std::make_pair(std::nan(\"\"), std::nan(\"\"));\n }\n if ((a == 0) && (b == 0) && (c != 0)) {\n // ROS_WARN(\"a = b = 0, there is no solution!\");\n return std::make_pair(std::nan(\"\"), std::nan(\"\"));\n }\n if ((a == 0) && (b != 0)) {\n // ROS_WARN(\"a = 0, non quadratic!\");\n return std::make_pair(-c / b, -c / b);\n }\n\n float d = b * b - 4 * a * c;\n if (d < 0) {\n // ROS_WARN(\"d = [%lf], complex solutions!\", d);\n return std::make_pair(std::nan(\"\"), std::nan(\"\"));\n }\n double e = sqrt(d);\n return std::make_pair((-b - e) / (2 * a), (-b + e) / (2 * a));\n}\n\nstd::vector getFirstSetOfContiguousSegments(const std::vector& input) {\n std::vector output;\n if (input.size() < 1) {\n ROS_ERROR(\"[Monitoring] input.size() < 1\");\n return output;\n }\n\n output.push_back(input[0]);\n float t_gap_threshold = 1.0; // [s] TODO: as a parameter?\n for (int i = 1; i < input.size(); i++) {\n double t_gap = fabs(input[i].t_A - input[i - 1].t_B);\n if (t_gap > t_gap_threshold) {\n // i-th Segment is not contiguous\n break;\n }\n output.push_back(input[i]);\n }\n\n return output;\n}\n\ngeometry_msgs::Vector3 getUnitOutwardVector(const gauss_msgs::Circle& circle, const gauss_msgs::Waypoint wp) {\n geometry_msgs::Vector3 out;\n out.x = wp.x - circle.x_center;\n out.y = wp.y - circle.y_center;\n auto len = sqrt(pow(out.x, 2) + pow(out.y, 2));\n out.x /= len;\n out.y /= len;\n return out;\n}\n\nstruct LossConflictiveSegments {\n LossConflictiveSegments(const Segment& first, const Segment& second) : first(first), second(second) {}\n\n friend std::ostream& operator<<(std::ostream& out, const LossConflictiveSegments& r);\n Segment first;\n Segment second;\n double t_min = std::nan(\"\");\n double s_min = std::nan(\"\");\n double t_crossing_0 = std::nan(\"\");\n double t_crossing_1 = std::nan(\"\");\n bool threshold_is_violated = false;\n};\n\nstd::ostream& operator<<(std::ostream& out, const LossConflictiveSegments& r) {\n out << \"first = \" << r.first << '\\n';\n out << \"second = \" << r.second << '\\n';\n out << \"t_min[s] = \" << r.t_min << '\\n';\n out << \"s_min[m2] = \" << r.s_min << '\\n';\n out << \"t_crossing_0[s] = \" << r.t_crossing_0 << '\\n';\n out << \"t_crossing_1[s] = \" << r.t_crossing_1 << '\\n';\n out << \"threshold_is_violated = \" << r.threshold_is_violated << '\\n';\n return out;\n}\n\nLossConflictiveSegments checkUnifiedSegmentsLoss(Segment first, Segment second, double s_threshold) {\n // print('checkUnifiedSegmentsLoss:')\n // print(first.point_A)\n // print(first.point_B)\n // print('___________')\n // print(second.point_A)\n // print(second.point_B)\n auto d = delta(first, second);\n // print(d)\n double c_x = pow(d.first.x, 2);\n double c_y = pow(d.first.y, 2);\n double c_z = pow(d.first.z, 2);\n double b_x = 2 * (d.first.x * d.second.x - c_x);\n double b_y = 2 * (d.first.y * d.second.y - c_y);\n double b_z = 2 * (d.first.z * d.second.z - c_z);\n double a_x = pow(d.second.x - d.first.x, 2);\n double a_y = pow(d.second.y - d.first.y, 2);\n double a_z = pow(d.second.z - d.first.z, 2);\n double a = a_x + a_y + a_z;\n double b = b_x + b_y + b_z;\n double c = c_x + c_y + c_z;\n\n double mu_min, t_min, s_min;\n if (a == 0) {\n // ROS_WARN(\"a = 0\");\n if (b >= 0) {\n mu_min = 0;\n t_min = first.t_A;\n s_min = c;\n } else {\n mu_min = 1;\n t_min = first.t_B;\n s_min = b + c;\n }\n\n } else { // a != 0\n double mu_star = -0.5 * b / a;\n double t_star = first.t_A + mu_star * (first.t_B - first.t_A);\n // print(mu_star)\n // print(t_star)\n // print(sq_distance(first, second, mu_star))\n mu_min = clamp(mu_star, 0, 1);\n t_min = first.t_A + mu_min * (first.t_B - first.t_A);\n s_min = sq_distance(first, second, mu_min);\n // print(mu_min)\n // print(t_min)\n // print(s_min)\n }\n auto result = LossConflictiveSegments(first, second);\n result.t_min = t_min;\n result.s_min = s_min;\n\n if (s_min > s_threshold) {\n // ROS_INFO(\"s_min[%lf] > s_threshold[%lf]\", s_min, s_threshold);\n result.threshold_is_violated = false;\n return result;\n }\n\n auto mu_bar = quadratic_roots(a, b, c - s_threshold);\n double t_bar_0 = first.t_A + mu_bar.first * (first.t_B - first.t_A);\n double t_bar_1 = first.t_A + mu_bar.second * (first.t_B - first.t_A);\n // print(mu_bar)\n // print(t_bar_0, t_bar_1)\n double mu_crossing_0 = clamp(mu_bar.first, 0, 1);\n double mu_crossing_1 = clamp(mu_bar.second, 0, 1);\n double t_crossing_0 = first.t_A + mu_crossing_0 * (first.t_B - first.t_A);\n double t_crossing_1 = first.t_A + mu_crossing_1 * (first.t_B - first.t_A);\n // print(mu_crossing_0, mu_crossing_1)\n // print(t_crossing_0, t_crossing_1)\n auto first_in_conflict = Segment(first.point_at_time(t_crossing_0), first.point_at_time(t_crossing_1));\n auto second_in_conflict = Segment(second.point_at_time(t_crossing_0), second.point_at_time(t_crossing_1));\n\n result.first = first_in_conflict;\n result.second = second_in_conflict;\n result.t_crossing_0 = t_crossing_0;\n result.t_crossing_1 = t_crossing_1;\n result.threshold_is_violated = true;\n return result;\n}\n\nLossConflictiveSegments checkSegmentsLoss(const std::pair& segments, double s_threshold) {\n // print('checkSegmentsLoss:')\n // print(first.point_A)\n // print(first.point_B)\n // print('___________')\n // print(second.point_A)\n // print(second.point_B)\n double t_A1 = segments.first.t_A;\n double t_B1 = segments.first.t_B;\n double t_A2 = segments.second.t_A;\n double t_B2 = segments.second.t_B;\n double t_alpha = std::max(t_A1, t_A2);\n double t_beta = std::min(t_B1, t_B2);\n if (t_alpha > t_beta) {\n // ROS_INFO(\"t_alpha[%lf] > t_beta[%lf]\", t_alpha, t_beta);\n return LossConflictiveSegments(segments.first, segments.second);\n }\n\n auto P_alpha1 = segments.first.point_at_time(t_alpha);\n auto P_beta1 = segments.first.point_at_time(t_beta);\n auto P_alpha2 = segments.second.point_at_time(t_alpha);\n auto P_beta2 = segments.second.point_at_time(t_beta);\n return checkUnifiedSegmentsLoss(Segment(P_alpha1, P_beta1), Segment(P_alpha2, P_beta2), s_threshold);\n}\n\nstd::vector checkTrajectoriesLoss(const std::pair& trajectories, double s_threshold) {\n std::vector segment_loss_results;\n if (trajectories.first.waypoints.size() < 2) {\n // TODO: Warn and push the same point twice?\n ROS_ERROR(\"[Monitoring] Trajectory must contain at least 2 points, [%ld] found in first argument\", trajectories.first.waypoints.size());\n return segment_loss_results;\n }\n if (trajectories.second.waypoints.size() < 2) {\n // TODO: Warn and push the same point twice?\n ROS_ERROR(\"[Monitoring] Trajectory must contain at least 2 points, [%ld] found in second argument\", trajectories.second.waypoints.size());\n return segment_loss_results;\n }\n\n for (int i = 0; i < trajectories.first.waypoints.size() - 1; i++) {\n std::pair segments;\n // printf(\"First segment, i = %d\\n\", i);\n segments.first = Segment(trajectories.first.waypoints[i], trajectories.first.waypoints[i + 1]);\n // std::cout << segments.first.point_A << \"_____________\\n\" << segments.first.point_B << '\\n';\n for (int j = 0; j < trajectories.second.waypoints.size() - 1; j++) {\n // printf(\"Second segment, j = %d\\n\", j);\n segments.second = Segment(trajectories.second.waypoints[j], trajectories.second.waypoints[j + 1]);\n // std::cout << segments.second.point_A << \"_____________\\n\" << segments.second.point_B << '\\n';\n auto loss_check = checkSegmentsLoss(segments, s_threshold);\n if (loss_check.threshold_is_violated) {\n // ROS_ERROR(\"[Monitoring] Loss of separation! [i = %d, j = %d]\", i, j);\n // std::cout << loss_check << '\\n';\n segment_loss_results.push_back(loss_check);\n }\n }\n }\n return segment_loss_results;\n}\n\nstruct LossExtreme {\n gauss_msgs::Waypoint in_point;\n gauss_msgs::Waypoint out_point;\n};\n\nvisualization_msgs::Marker translateToMarker(const LossExtreme& extremes, int id = 0) {\n visualization_msgs::Marker marker;\n std_msgs::ColorRGBA red, green; // TODO: constants\n red.r = 1.0;\n red.a = 1.0;\n green.g = 1.0;\n green.a = 1.0;\n\n marker.header.stamp = ros::Time::now();\n marker.header.frame_id = \"map\"; // TODO: other?\n marker.ns = \"extremes\"; // TODO: other?\n marker.id = id;\n marker.type = visualization_msgs::Marker::SPHERE_LIST;\n marker.action = visualization_msgs::Marker::ADD;\n marker.pose.orientation.w = 1;\n marker.scale.x = 5.0;\n marker.scale.y = 5.0;\n marker.scale.z = 5.0;\n marker.lifetime = ros::Duration(1.0); // TODO: pair with frequency\n marker.points.push_back(translateToPoint(extremes.in_point));\n marker.colors.push_back(red);\n marker.points.push_back(translateToPoint(extremes.out_point));\n marker.colors.push_back(green);\n return marker;\n}\n\ngauss_msgs::ConflictiveOperation fillConflictiveOperation(const int& _trajectory_index, const std::map& _index_to_operation_map) {\n gauss_msgs::ConflictiveOperation out_msg;\n out_msg.actual_wp = _index_to_operation_map.at(_trajectory_index).track.waypoints.back();\n out_msg.current_wp = _index_to_operation_map.at(_trajectory_index).current_wp;\n out_msg.estimated_trajectory = _index_to_operation_map.at(_trajectory_index).estimated_trajectory;\n out_msg.flight_plan = _index_to_operation_map.at(_trajectory_index).flight_plan;\n out_msg.flight_plan_updated = _index_to_operation_map.at(_trajectory_index).flight_plan_updated;\n out_msg.landing_spots = _index_to_operation_map.at(_trajectory_index).landing_spots;\n out_msg.operational_volume = _index_to_operation_map.at(_trajectory_index).operational_volume;\n out_msg.uav_id = _index_to_operation_map.at(_trajectory_index).uav_id;\n\n return out_msg;\n}\n\nstruct LossResult {\n LossResult(const int i, const int j) : first_trajectory_index(i), second_trajectory_index(j) {}\n friend std::ostream& operator<<(std::ostream& out, const LossResult& r);\n int first_trajectory_index;\n int second_trajectory_index;\n std::vector loss_conflictive_segments;\n\n bool isEqual(const LossResult& other) {\n const float check_time_margin = 10.0; // [s]\n return (first_trajectory_index == other.first_trajectory_index && second_trajectory_index == other.second_trajectory_index) &&\n (std::abs(loss_conflictive_segments.front().t_crossing_0 - other.loss_conflictive_segments.front().t_crossing_0) <= check_time_margin ||\n std::abs(loss_conflictive_segments.front().t_crossing_1 - other.loss_conflictive_segments.front().t_crossing_1) <= check_time_margin ||\n std::abs(loss_conflictive_segments.back().t_crossing_0 - other.loss_conflictive_segments.back().t_crossing_0) <= check_time_margin ||\n std::abs(loss_conflictive_segments.back().t_crossing_1 - other.loss_conflictive_segments.back().t_crossing_1) <= check_time_margin);\n }\n gauss_msgs::NewThreat convertToThreat(const std::map& _index_to_operation_map, double& count_id) {\n gauss_msgs::NewThreat out_threat;\n out_threat.threat_id = count_id++;\n out_threat.threat_type = out_threat.LOSS_OF_SEPARATION;\n out_threat.uav_ids.push_back(_index_to_operation_map.at(first_trajectory_index).uav_id);\n out_threat.uav_ids.push_back(_index_to_operation_map.at(second_trajectory_index).uav_id);\n out_threat.priority_ops.push_back(_index_to_operation_map.at(first_trajectory_index).priority);\n out_threat.priority_ops.push_back(_index_to_operation_map.at(second_trajectory_index).priority);\n out_threat.conflictive_operations.push_back(fillConflictiveOperation(first_trajectory_index, _index_to_operation_map));\n out_threat.conflictive_operations.push_back(fillConflictiveOperation(second_trajectory_index, _index_to_operation_map));\n for (auto j : loss_conflictive_segments) {\n out_threat.loss_conflictive_segments.segment_first.push_back(j.first.point_A);\n out_threat.loss_conflictive_segments.segment_first.push_back(j.first.point_B);\n out_threat.loss_conflictive_segments.segment_second.push_back(j.second.point_A);\n out_threat.loss_conflictive_segments.segment_second.push_back(j.second.point_B);\n out_threat.loss_conflictive_segments.t_min = j.t_min;\n out_threat.loss_conflictive_segments.s_min = j.s_min;\n out_threat.loss_conflictive_segments.t_crossing_0 = j.t_crossing_0;\n out_threat.loss_conflictive_segments.t_crossing_1 = j.t_crossing_1;\n out_threat.loss_conflictive_segments.point_at_t_min_segment_first = j.first.point_at_time(j.t_min);\n out_threat.loss_conflictive_segments.point_at_t_min_segment_second = j.second.point_at_time(j.t_min);\n }\n\n return out_threat;\n }\n};\n\nstruct GeoConflictiveTrajectory {\n GeoConflictiveTrajectory(int i): trajectory_index(i) {}\n int trajectory_index;\n std::vector geofence_conflictive_segments;\n gauss_msgs::Waypoint closest_exit_wp; // Use the mandatory field as intrusion flag\n\n bool isEqual(const GeoConflictiveTrajectory& other) {\n const double check_time_margin = 10.0;\n return (trajectory_index == other.trajectory_index &&\n closest_exit_wp.mandatory == other.closest_exit_wp.mandatory &&\n (geofence_conflictive_segments.front().t_A - other.geofence_conflictive_segments.front().t_A <= check_time_margin ||\n geofence_conflictive_segments.back().t_B - other.geofence_conflictive_segments.back().t_B <= check_time_margin));\n }\n};\n\nstruct GeofenceResult {\n GeofenceResult(int i): geofence_id(i) {}\n int geofence_id;\n std::vector geo_conflictive_trajectories;\n \n bool isEqual(const GeofenceResult& other) {\n const float check_time_margin = 10.0; // [s]\n if (geofence_id != other.geofence_id) {\n return false;\n } else {\n for (auto geo_trajectory : geo_conflictive_trajectories) {\n std::vector::const_iterator traj_it = std::find_if(other.geo_conflictive_trajectories.begin(), other.geo_conflictive_trajectories.end(),\n [geo_trajectory](GeoConflictiveTrajectory _traj) { return _traj.isEqual(geo_trajectory);});\n return (traj_it != other.geo_conflictive_trajectories.end());\n }\n }\n }\n std::vector convertToThreat(const std::map& _index_to_operation_map, const std::map& _index_to_geofence_map, double& count_id) {\n std::vector out_threats;\n for (auto geo_conflictive_trajectory : geo_conflictive_trajectories) {\n gauss_msgs::NewThreat aux_threat;\n aux_threat.threat_id = count_id++;\n aux_threat.geofence_ids.push_back(geofence_id);\n aux_threat.uav_ids.push_back(_index_to_operation_map.at(geo_conflictive_trajectory.trajectory_index).uav_id);\n aux_threat.conflictive_geofences.push_back(_index_to_geofence_map.at(geofence_id));\n aux_threat.conflictive_operations.push_back(fillConflictiveOperation(geo_conflictive_trajectory.trajectory_index, _index_to_operation_map));\n std::vector aux_vec = getFirstSetOfContiguousSegments(geo_conflictive_trajectory.geofence_conflictive_segments);\n for (auto segment : aux_vec) {\n aux_threat.geofence_conflictive_segments.first_contiguous_segment.push_back(segment.point_A);\n aux_threat.geofence_conflictive_segments.first_contiguous_segment.push_back(segment.point_B);\n }\n for (auto segment : geo_conflictive_trajectory.geofence_conflictive_segments) {\n aux_threat.geofence_conflictive_segments.all_segments.push_back(segment.point_A);\n aux_threat.geofence_conflictive_segments.all_segments.push_back(segment.point_B);\n }\n\n auto geo_circle = _index_to_geofence_map.at(geofence_id).circle;\n auto crossing_0 = aux_threat.geofence_conflictive_segments.first_contiguous_segment.front();\n auto crossing_1 = aux_threat.geofence_conflictive_segments.first_contiguous_segment.back();\n aux_threat.geofence_conflictive_segments.crossing_0_out_vector = getUnitOutwardVector(geo_circle, crossing_0);\n aux_threat.geofence_conflictive_segments.crossing_1_out_vector = getUnitOutwardVector(geo_circle, crossing_1);\n if (geo_conflictive_trajectory.closest_exit_wp.mandatory) {\n aux_threat.threat_type = aux_threat.GEOFENCE_INTRUSION;\n auto closest_exit = geo_conflictive_trajectory.closest_exit_wp;\n aux_threat.geofence_conflictive_segments.closest_exit_wp = closest_exit;\n aux_threat.geofence_conflictive_segments.closest_exit_out_vector = getUnitOutwardVector(geo_circle, closest_exit);\n } else {\n aux_threat.threat_type = aux_threat.GEOFENCE_CONFLICT;\n }\n out_threats.push_back(aux_threat);\n }\n\n return out_threats;\n }\n};\n\nstd::vector getContiguousResults(const LossResult& input) {\n std::vector output;\n if (input.loss_conflictive_segments.size() < 1) {\n ROS_ERROR(\"[Monitoring] input.loss_conflictive_segments.size() < 1\");\n return output;\n }\n\n float t_gap_threshold = 1.0; // [s] TODO: as a parameter?\n auto current_loss_result = input;\n current_loss_result.loss_conflictive_segments.clear();\n current_loss_result.loss_conflictive_segments.push_back(input.loss_conflictive_segments[0]);\n for (int i = 1; i < input.loss_conflictive_segments.size(); i++) {\n double t_gap_first = fabs(input.loss_conflictive_segments[i].first.t_A - input.loss_conflictive_segments[i - 1].first.t_B);\n double t_gap_second = fabs(input.loss_conflictive_segments[i].second.t_A - input.loss_conflictive_segments[i - 1].second.t_B);\n if ((t_gap_first > t_gap_threshold) || (t_gap_second > t_gap_threshold)) {\n // i-th element is not contiguous\n output.push_back(current_loss_result);\n current_loss_result.loss_conflictive_segments.clear();\n }\n current_loss_result.loss_conflictive_segments.push_back(input.loss_conflictive_segments[i]);\n }\n output.push_back(current_loss_result);\n\n return output;\n}\n\nstd::pair calculateExtremes(const LossResult& result) {\n std::pair extremes;\n if (result.loss_conflictive_segments.size() < 1) {\n ROS_ERROR(\"[Monitoring] result.loss_conflictive_segments.size() < 1\");\n return extremes;\n }\n\n // In points are for sure A's from segment 0\n extremes.first.in_point = result.loss_conflictive_segments[0].first.point_A;\n extremes.second.in_point = result.loss_conflictive_segments[0].second.point_A;\n\n // Initialize out points as B's from segment 0...\n extremes.first.out_point = result.loss_conflictive_segments[0].first.point_B;\n extremes.second.out_point = result.loss_conflictive_segments[0].second.point_B;\n // ...but update if more contiguous segments are available\n float t_gap_threshold = 1.0; // [s]\n // Check for first\n for (int i = 1; i < result.loss_conflictive_segments.size(); i++) {\n double t_gap = fabs(result.loss_conflictive_segments[i].first.t_A - result.loss_conflictive_segments[i - 1].first.t_B);\n if (t_gap > t_gap_threshold) {\n break;\n }\n extremes.first.out_point = result.loss_conflictive_segments[i].first.point_B;\n }\n // Check for second\n for (int i = 1; i < result.loss_conflictive_segments.size(); i++) {\n double t_gap = fabs(result.loss_conflictive_segments[i].second.t_A - result.loss_conflictive_segments[i - 1].second.t_B);\n if (t_gap > t_gap_threshold) {\n break;\n }\n extremes.second.out_point = result.loss_conflictive_segments[i].second.point_B;\n }\n\n return extremes;\n}\n\nvisualization_msgs::Marker translateToMarker(const LossResult& result) {\n visualization_msgs::Marker marker;\n marker.header.stamp = ros::Time::now();\n marker.header.frame_id = \"map\"; // TODO: other?\n marker.ns = \"loss_\" + std::to_string(result.first_trajectory_index) + \"_\" + std::to_string(result.second_trajectory_index);\n marker.type = visualization_msgs::Marker::LINE_LIST;\n marker.action = visualization_msgs::Marker::ADD;\n marker.pose.orientation.w = 1;\n marker.scale.x = 1.0;\n marker.color.r = 1.0; // TODO: color?\n marker.color.a = 1.0;\n marker.lifetime = ros::Duration(1.0); // TODO: pair with frequency\n for (auto segments_loss : result.loss_conflictive_segments) {\n marker.points.push_back(translateToPoint(segments_loss.first.point_A));\n marker.points.push_back(translateToPoint(segments_loss.first.point_B));\n marker.points.push_back(translateToPoint(segments_loss.second.point_A));\n marker.points.push_back(translateToPoint(segments_loss.second.point_B));\n }\n /*\n double t_min = std::nan(\"\");\n double s_min = std::nan(\"\");\n double t_crossing_0 = std::nan(\"\");\n double t_crossing_1 = std::nan(\"\");\n bool threshold_is_violated = false;\n*/\n return marker;\n}\n\nstd::ostream& operator<<(std::ostream& out, const LossResult& r) {\n out << \"first_trajectory_index = \" << r.first_trajectory_index << '\\n';\n out << \"second_trajectory_index = \" << r.second_trajectory_index << '\\n';\n out << \"loss_conflictive_segments = [\" << r.second_trajectory_index << '\\n';\n for (int i = 0; i < r.loss_conflictive_segments.size(); i++) {\n out << r.loss_conflictive_segments[i] << '\\n';\n }\n out << \"]\\n\";\n return out;\n}\n\nbool happensBefore(const LossResult& a, const LossResult& b) {\n if (a.loss_conflictive_segments.size() < 1) {\n ROS_ERROR(\"[Monitoring] a.loss_conflictive_segments.size() < 1\");\n return false;\n }\n if (b.loss_conflictive_segments.size() < 1) {\n ROS_ERROR(\"[Monitoring] b.loss_conflictive_segments.size() < 1\");\n return true;\n }\n\n return a.loss_conflictive_segments[0].t_crossing_0 < b.loss_conflictive_segments[0].t_crossing_0;\n}\n\nvoid cleanStoredList(std::vector& _stored_loss_result_list, std::vector& _stored_geofence_result_list, const std::vector& _actual_loss_result_list, const std::vector& _actual_geofence_result_list, const double& _check_time_margin) {\n if (_actual_loss_result_list.size() == 0) {\n _stored_loss_result_list.clear();\n } else {\n for (auto _stored_result = _stored_loss_result_list.begin(); _stored_result != _stored_loss_result_list.end();) {\n std::vector::const_iterator actual_it = std::find_if(_actual_loss_result_list.begin(), _actual_loss_result_list.end(),\n [_stored_result](LossResult _actual_result) { return _actual_result.isEqual(*_stored_result); });\n bool do_erase = (actual_it == _actual_loss_result_list.end());\n if (do_erase) {\n _stored_result = _stored_loss_result_list.erase(_stored_result);\n } else {\n _stored_result++;\n }\n }\n }\n}\n\ngauss_msgs::NewThreats manageResultList(std::vector& _loss_result_list, std::vector& _geofence_result_list, const std::map& _index_to_operation_map, const std::map& _index_to_geofence_map) {\n double check_time_margin = 10.0;\n static double threat_count_id = 0;\n static double stored_loss_id_count = 0;\n static double stored_geofence_id_count = 0;\n gauss_msgs::NewThreats out_threats;\n static std::vector stored_loss_result_list;\n static std::vector stored_geofence_result_list;\n\n if (stored_loss_result_list.size() > 0 || stored_geofence_result_list.size() > 0) cleanStoredList(stored_loss_result_list, stored_geofence_result_list, _loss_result_list, _geofence_result_list, check_time_margin);\n if (_loss_result_list.size() > 0) {\n if (stored_loss_result_list.size() == 0) {\n stored_loss_result_list.push_back(_loss_result_list.front());\n out_threats.request.threats.push_back(_loss_result_list[0].convertToThreat(_index_to_operation_map, threat_count_id));\n } else {\n for (auto _loss_result : _loss_result_list) {\n bool save_loss_result = false;\n // Using lambda, check if both trajectory index of _loss_result are in stored_loss_result_list\n std::vector::iterator stored_it = std::find_if(stored_loss_result_list.begin(), stored_loss_result_list.end(),\n [_loss_result](LossResult stored_loss_result) { return stored_loss_result.isEqual(_loss_result); });\n save_loss_result = (stored_it == stored_loss_result_list.end());\n if (save_loss_result) {\n stored_loss_result_list.push_back(_loss_result);\n out_threats.request.threats.push_back(_loss_result.convertToThreat(_index_to_operation_map, threat_count_id));\n }\n }\n }\n }\n if (_geofence_result_list.size() > 0) {\n if (stored_geofence_result_list.size() == 0) {\n stored_geofence_result_list.push_back(_geofence_result_list.front());\n std::vector aux_vec = _geofence_result_list[0].convertToThreat(_index_to_operation_map, _index_to_geofence_map, threat_count_id);\n out_threats.request.threats.insert(out_threats.request.threats.end(), aux_vec.begin(), aux_vec.end());\n } else {\n for (auto _geofence_result : _geofence_result_list) {\n bool save_geofence_result = false;\n // Using lambda, check if both trajectory index of _geofence_result are in stored_geofence_result_list\n std::vector::iterator stored_it = std::find_if(stored_geofence_result_list.begin(), stored_geofence_result_list.end(),\n [_geofence_result](GeofenceResult stored_loss_result) { return stored_loss_result.isEqual(_geofence_result); });\n save_geofence_result = (stored_it == stored_geofence_result_list.end());\n if (save_geofence_result) {\n stored_geofence_result_list.push_back(_geofence_result);\n std::vector aux_vec = _geofence_result.convertToThreat(_index_to_operation_map, _index_to_geofence_map, threat_count_id);\n out_threats.request.threats.insert(out_threats.request.threats.end(), aux_vec.begin(), aux_vec.end());\n }\n }\n }\n }\n\n return out_threats;\n}\n\nstd::pair checkGeofence2D(const Segment& segment, const gauss_msgs::Circle& circle) {\n\n auto translated_segment = segment;\n translated_segment.point_A.x -= circle.x_center;\n translated_segment.point_A.y -= circle.y_center;\n translated_segment.point_B.x -= circle.x_center;\n translated_segment.point_B.y -= circle.y_center;\n\n float sq_distance_A = pow(translated_segment.point_A.x, 2) + pow(translated_segment.point_A.y, 2);\n float sq_distance_B = pow(translated_segment.point_B.x, 2) + pow(translated_segment.point_B.y, 2);\n float sq_radius = pow(circle.radius, 2);\n\n bool point_A_is_in = (sq_distance_A < sq_radius);\n bool point_B_is_in = (sq_distance_B < sq_radius);\n\n // Geofence2DResult result;\n if (point_A_is_in && point_B_is_in) {\n // A and B inside the circle\n // ROS_INFO(\"A and B inside the circle\");\n return std::make_pair(segment.t_A, segment.t_B);\n }\n\n float a_x = pow(translated_segment.point_B.x - translated_segment.point_A.x, 2);\n float b_x = 2.0 * (translated_segment.point_B.x - translated_segment.point_A.x) * translated_segment.point_A.x;\n float c_x = pow(translated_segment.point_A.x, 2);\n float a_y = pow(translated_segment.point_B.y - translated_segment.point_A.y, 2);\n float b_y = 2.0 * (translated_segment.point_B.y - translated_segment.point_A.y) * translated_segment.point_A.y;\n float c_y = pow(translated_segment.point_A.y, 2);\n\n auto m_crossing = quadratic_roots(a_x + a_y, b_x + b_y, c_x + c_y - sq_radius);\n // ROS_INFO(\"Roots: m = [%lf, %lf]\", m_crossing.first, m_crossing.second);\n\n if (std::isnan(m_crossing.first)) { // m_crossing.second should also be nan\n // There is no intersection at all\n // ROS_INFO(\"There is no intersection at all\");\n return std::make_pair(std::nan(\"\"), std::nan(\"\"));\n }\n\n if ((m_crossing.first > 1) || (m_crossing.second < 0)) {\n // ROS_INFO(\"Intersection is out of the segment\");\n return std::make_pair(std::nan(\"\"), std::nan(\"\"));\n }\n\n auto t_crossing_0 = translated_segment.t_A + clamp(m_crossing.first, 0, 1) * (translated_segment.t_B - translated_segment.t_A);\n auto t_crossing_1 = translated_segment.t_A + clamp(m_crossing.second, 0, 1) * (translated_segment.t_B - translated_segment.t_A);\n return std::make_pair(t_crossing_0, t_crossing_1);\n}\n\nbool checkOverlappingInTime(std::pair time_interval_a, std::pair time_interval_b) {\n return (time_interval_a.first <= time_interval_b.second) && (time_interval_a.second >= time_interval_b.first);\n}\n\ngeometry_msgs::Point calculateClosestExit(const geometry_msgs::Point& current, const gauss_msgs::Circle& circle) {\n geometry_msgs::Point out;\n float delta_x = current.x - circle.x_center;\n float delta_y = current.y - circle.y_center;\n auto distance = sqrt(pow(delta_x, 2) + pow(delta_y, 2));\n if (distance < 1e-3) {\n // At the center of the geofence? Go East!\n out.x = circle.x_center + circle.radius;\n out.y = circle.y_center;\n // out.x = std::nan(\"\"); // TODO: Better NaN and handle later?\n // out.y = std::nan(\"\");\n return out;\n }\n out.x = circle.x_center + (delta_x / distance) * circle.radius;\n out.y = circle.y_center + (delta_y / distance) * circle.radius;\n return out;\n}\n\nstd::vector checkGeofenceConflict(const std::vector& trajectories, const std::vector& volumes, const gauss_msgs::Geofence& geofence) {\n std::vector result;\n\n if (!geofence.cylinder_shape) {\n // TODO: implement also for polygons\n ROS_ERROR(\"[Monitoring] Polygon geofences not implemented yet\");\n return result;\n // float64 min_altitude\t# meters\n // float64 max_altitude\t# meters\n // Polygon polygon\n // float64[] x\n // float64[] y\n }\n\n if (trajectories.size() != volumes.size()) {\n ROS_ERROR(\"[Monitoring] Sizes do not match: trajectories.size() = %ld, volumes.size() = %ld\", trajectories.size(), volumes.size());\n return result;\n }\n\n for (int i = 0; i < trajectories.size(); i++) {\n // ROS_INFO(\"Checking trajectory [%d]\", i);\n GeoConflictiveTrajectory current_result(i);\n\n if (trajectories[i].waypoints.size() < 2) {\n // TODO: Warn and push the same point twice?\n ROS_ERROR(\"[Monitoring]: trajectory must contain at least 2 points, [%ld] found in second argument\", trajectories[i].waypoints.size());\n continue;\n }\n auto operational_volume = volumes[i];\n auto rectified_geofence = geofence;\n rectified_geofence.min_altitude -= operational_volume;\n rectified_geofence.max_altitude += operational_volume;\n rectified_geofence.circle.radius += operational_volume;\n\n for (int j = 0; j < trajectories[i].waypoints.size() - 1; j++) {\n // ROS_INFO(\"Checking segment [%d, %d]\", j, j + 1);\n auto segment = Segment(trajectories[i].waypoints[j], trajectories[i].waypoints[j + 1]);\n\n // TODO: combine the following two ifs into a single one?\n if ((segment.point_A.z < rectified_geofence.min_altitude) && (segment.point_B.z < rectified_geofence.min_altitude)) {\n // The whole segment lies below the rectified_geofence\n // ROS_INFO(\"The whole segment lies below the geofence\");\n continue;\n }\n if ((segment.point_A.z > rectified_geofence.max_altitude) && (segment.point_B.z > rectified_geofence.max_altitude)) {\n // The whole segment lies above the rectified_geofence\n // ROS_INFO(\"The whole segment lies above the geofence\");\n continue;\n }\n\n // TODO: enlarge the circle with operational volume (* some security_gain)\n float delta_z = segment.point_B.z - segment.point_A.z;\n // if (fabs(delta_z) < 1e-3) { // TODO: Early return?\n // // We can consider the whole segment lies inside the geofence z interval\n // // TODO: Consider the special case of j == 0 for geofence intrusion!\n // ROS_INFO(\"We can consider the whole segment lies inside the geofence z interval\");\n // }\n\n // Get the segment that does lie between min_alt, max_alt\n if (segment.point_A.z < rectified_geofence.min_altitude) {\n // Point A lies below the geofence z interval\n // ROS_INFO(\"Point A lies below the geofence z interval\");\n float m_min = (rectified_geofence.min_altitude - segment.point_A.z) / delta_z;\n segment.point_A = segment.point_at_param(m_min);\n // TODO: Consider the special case of j == 0 for geofence intrusion!\n } else if (segment.point_A.z > rectified_geofence.max_altitude) {\n // Point A lies above the geofence z interval\n // ROS_INFO(\"Point A lies above the geofence z interval\");\n float m_max = (rectified_geofence.max_altitude - segment.point_A.z) / delta_z;\n segment.point_A = segment.point_at_param(m_max);\n // TODO: Consider the special case of j == 0 for geofence intrusion!\n }\n\n // Same for point B // TODO: repeated code!\n if (segment.point_B.z < rectified_geofence.min_altitude) {\n // Point B lies below the geofence z interval\n // ROS_INFO(\"Point B lies below the geofence z interval\");\n float m_min = (rectified_geofence.min_altitude - segment.point_B.z) / delta_z;\n segment.point_B = segment.point_at_param(m_min);\n } else if (segment.point_B.z > rectified_geofence.max_altitude) {\n // Point B lies above the geofence z interval\n // ROS_INFO(\"Point B lies above the geofence z interval\");\n float m_max = (rectified_geofence.max_altitude - segment.point_B.z) / delta_z;\n segment.point_B = segment.point_at_param(m_max);\n }\n\n auto conflict_times = checkGeofence2D(segment, rectified_geofence.circle);\n if (std::isnan(conflict_times.first)) { // conflict_times.second should be also nan\n // ROS_INFO(\"No conflicts\");\n // return result; // TODO: Only for debug!\n continue;\n } // TODO: Rename to GeofenceConflict?\n\n auto current_time = ros::Time::now().toSec();\n if (current_time > conflict_times.second) { // Should be also > conflict_times.second\n // ROS_INFO(\"Past conflicts do not count :)\");\n // return result; // TODO: Only for debug!\n continue;\n }\n\n if (checkOverlappingInTime(conflict_times, std::make_pair(rectified_geofence.start_time.toSec(), rectified_geofence.end_time.toSec()))) {\n // ROS_INFO(\"Conflict!\"); // TODO\n auto current_position = trajectories[i].waypoints[j];\n // Check also for intrusion:\n if ((j == 0)\n && in_range(current_position.z, rectified_geofence.min_altitude, rectified_geofence.max_altitude)\n && (pow(current_position.x - rectified_geofence.circle.x_center, 2) + pow(current_position.y - rectified_geofence.circle.y_center, 2) < pow(rectified_geofence.circle.radius, 2))\n ) {\n // ROS_ERROR(\"[Monitoring] Geofence intrusion! [i = %d]\", current_result.trajectory_index);\n current_result.closest_exit_wp.mandatory = true;\n auto exit_circle = geofence.circle;\n exit_circle.radius += operational_volume * 2.0;\n auto xy_closest_exit = calculateClosestExit(translateToPoint(current_position), exit_circle);\n // auto xy_closest_exit = calculateClosestExit(translateToPoint(current_position), rectified_geofence.circle);\n current_result.closest_exit_wp.x = xy_closest_exit.x;\n current_result.closest_exit_wp.y = xy_closest_exit.y;\n current_result.closest_exit_wp.z = current_position.z; // Suppose we want the closest exit with no changes in altuitude!\n }\n current_result.geofence_conflictive_segments.push_back(Segment(segment.point_at_time(conflict_times.first), segment.point_at_time(conflict_times.second)));\n }\n // result.push_back(current_result); // TODO: Only for debug!\n // return result; // TODO: Only for debug!\n }\n if (current_result.geofence_conflictive_segments.size() > 0) {\n result.push_back(current_result);\n }\n }\n\n return result;\n}\n\nint main(int argc, char** argv) {\n ros::init(argc, argv, \"continuous_monitoring\");\n\n ros::NodeHandle n;\n // ros::NodeHandle np(\"~\");\n ROS_INFO(\"[Monitoring] Started monitoring node!\");\n double safety_distance;\n bool just_one_threat;\n n.param(\"safetyDistance\", safety_distance, 10.0);\n n.param(\"just_one_threat\", just_one_threat, false);\n double safety_distance_sq = pow(safety_distance, 2);\n\n auto read_icao_srv_url = \"/gauss/read_icao\";\n auto read_operation_srv_url = \"/gauss/read_operation\";\n auto read_geofences_srv_url = \"/gauss/read_geofences\";\n auto tactical_srv_url = \"/gauss/new_tactical_deconfliction\";\n auto alternatives_topic_url = \"/gauss/possible_alternatives\";\n auto new_threats_srv_url = \"/gauss/new_threats\";\n auto visualization_topic_url = \"/gauss/visualize_monitoring\";\n\n ros::ServiceClient icao_client = n.serviceClient(read_icao_srv_url);\n ros::ServiceClient operation_client = n.serviceClient(read_operation_srv_url);\n ros::ServiceClient geofences_client = n.serviceClient(read_geofences_srv_url);\n ros::ServiceClient tactical_client = n.serviceClient(tactical_srv_url);\n ros::ServiceClient possible_alternatives_client = n.serviceClient(alternatives_topic_url);\n ros::ServiceClient new_threats_client = n.serviceClient(new_threats_srv_url);\n ros::Publisher visualization_pub = n.advertise(visualization_topic_url, 1);\n\n ROS_INFO(\"[Monitoring] Waiting for required services...\");\n ros::service::waitForService(read_icao_srv_url, -1);\n ROS_INFO(\"[Monitoring] %s: ok\", read_icao_srv_url);\n ros::service::waitForService(read_operation_srv_url, -1);\n ROS_INFO(\"[Monitoring] %s: ok\", read_operation_srv_url);\n ros::service::waitForService(read_geofences_srv_url, -1);\n ROS_INFO(\"[Monitoring] %s: ok\", read_geofences_srv_url);\n ros::service::waitForService(tactical_srv_url, -1);\n ROS_INFO(\"[Monitoring] %s: ok\", tactical_srv_url);\n ros::service::waitForService(new_threats_srv_url, -1);\n ROS_INFO(\"[Monitoring] %s: ok\", new_threats_srv_url);\n ros::Rate rate(1); // [Hz]\n while (ros::ok()) {\n gauss_msgs::ReadIcao read_icao;\n if (icao_client.call(read_icao)) {\n // ROS_INFO(\"[Monitoring] Read icao addresses... ok\");\n // std::cout << read_icao.response << '\\n';\n } else {\n ROS_ERROR(\"[Monitoring] Failed to call service: [%s]\", read_icao_srv_url);\n return 1;\n }\n\n gauss_msgs::ReadOperation read_operation;\n read_operation.request.uav_ids = read_icao.response.uav_id;\n if (operation_client.call(read_operation)) {\n // ROS_INFO(\"[Monitoring] Read operations... ok\");\n // std::cout << read_operation.response << '\\n';\n } else {\n ROS_ERROR(\"[Monitoring] Failed to call service: [%s]\", read_operation_srv_url);\n return 1;\n }\n\n gauss_msgs::ReadGeofences read_geofences;\n read_geofences.request.geofences_ids = read_icao.response.geofence_id;\n if (geofences_client.call(read_geofences)) {\n // ROS_INFO(\"[Monitoring] Read geofences... ok\");\n // std::cout << read_geofences.response << '\\n';\n } else {\n ROS_ERROR(\"[Monitoring] Failed to call service: [%s]\", read_geofences_srv_url);\n return 1;\n }\n\n std::map icao_to_index_map;\n std::map index_to_operation_map;\n std::map index_to_geofence_map;\n std::vector estimated_trajectories;\n std::vector operational_volumes;\n for (auto operation : read_operation.response.operation) {\n // std::cout << operation << '\\n';\n if (operation.is_started) {\n icao_to_index_map[operation.icao_address] = estimated_trajectories.size();\n index_to_operation_map[estimated_trajectories.size()] = operation;\n estimated_trajectories.push_back(operation.estimated_trajectory);\n operational_volumes.push_back(operation.operational_volume);\n }\n }\n\n std::vector geofence_results_list;\n for (auto geofence : read_geofences.response.geofences) {\n index_to_geofence_map[geofence.id] = geofence;\n // std::cout << geofence << '\\n';\n // ROS_INFO(\"_________________________\");\n // ROS_INFO(\"Checking geofence id [%d]\", geofence.id);\n GeofenceResult current_result(geofence.id);\n current_result.geo_conflictive_trajectories = checkGeofenceConflict(estimated_trajectories, operational_volumes, geofence);\n if (current_result.geo_conflictive_trajectories.size() > 0) {\n geofence_results_list.push_back(current_result);\n }\n }\n // Visualize...\n visualization_msgs::MarkerArray marker_array;\n for (int i = 0; i < geofence_results_list.size(); i++) {\n auto geofence_result = geofence_results_list[i];\n // geofence_result.geofence_id\n for (int j = 0; j < geofence_result.geo_conflictive_trajectories.size(); j++) {\n auto trajectory = geofence_result.geo_conflictive_trajectories[j];\n // trajectory.trajectory_index;\n auto all_conflicts = trajectory.geofence_conflictive_segments;\n auto first_conflict = getFirstSetOfContiguousSegments(all_conflicts);\n auto conflicts = first_conflict;\n int segment_id = 1e6 * i + 1e3 * j;\n std_msgs::ColorRGBA segment_color;\n segment_color.a = 1.0;\n segment_color.r = 1.0;\n if (trajectory.closest_exit_wp.mandatory) {\n // Means it is an intrusion!\n Segment way_out(trajectory.closest_exit_wp, conflicts.back().point_B);\n marker_array.markers.push_back(way_out.translateToMarker(segment_id+1e9, segment_color));\n // continue;\n }\n // else:\n for (int k = 0; k < conflicts.size(); k++) {\n segment_id += k;\n segment_color.g = 0.5;\n marker_array.markers.push_back(conflicts[k].translateToMarker(segment_id, segment_color));\n }\n }\n }\n\n std::vector loss_results_list;\n if (estimated_trajectories.size() >= 2) {\n for (int i = 0; i < estimated_trajectories.size() - 1; i++) {\n std::pair trajectories;\n trajectories.first = estimated_trajectories[i];\n for (int j = i + 1; j < estimated_trajectories.size(); j++) {\n // ROS_INFO(\"Checking trajectories: [%d, %d]\", i, j);\n trajectories.second = estimated_trajectories[j];\n double s_threshold = std::max(safety_distance_sq, pow(operational_volumes[i] + operational_volumes[j], 2));\n auto loss_conflictive_segments = checkTrajectoriesLoss(trajectories, s_threshold);\n if (loss_conflictive_segments.size() > 0) {\n LossResult loss_result(i, j);\n loss_result.loss_conflictive_segments = loss_conflictive_segments;\n loss_results_list.push_back(loss_result);\n }\n }\n }\n }\n\n std::sort(loss_results_list.begin(), loss_results_list.end(), happensBefore);\n gauss_msgs::NewThreats threats_msg;\n if (just_one_threat && (loss_results_list.size() > 0 || geofence_results_list.size() > 0)) {\n threats_msg = manageResultList(loss_results_list, geofence_results_list, index_to_operation_map, index_to_geofence_map);\n just_one_threat = false;\n }\n if (threats_msg.request.threats.size() > 0) {\n std::string cout_threats;\n for (auto threat : threats_msg.request.threats) {\n cout_threats = cout_threats + \" [\" + std::to_string(threat.threat_id) + \" \" + std::to_string(threat.threat_type) + \" |\";\n for (auto uav_id : threat.uav_ids) cout_threats = cout_threats + \" \" + std::to_string(uav_id);\n cout_threats = cout_threats + \"]\";\n }\n ROS_INFO_STREAM(\"[Monitoring] Threats detected: [id type | uav] \" + cout_threats);\n if (new_threats_client.call(threats_msg)) {\n // ROS_INFO(\"[Monitoring] Call tactical... ok\");\n } else {\n ROS_ERROR(\"[Monitoring] Failed to call service: [%s]\", tactical_srv_url);\n return 1;\n }\n }\n\n for (int i = 0; i < loss_results_list.size(); i++) {\n // std::cout << loss_results_list[i] << '\\n';\n marker_array.markers.push_back(translateToMarker(loss_results_list[i]));\n auto extremes = calculateExtremes(loss_results_list[i]);\n marker_array.markers.push_back(translateToMarker(extremes.first, 10 * i));\n marker_array.markers.push_back(translateToMarker(extremes.second, 10 * i + 1));\n }\n visualization_pub.publish(marker_array);\n\n ros::spinOnce();\n rate.sleep();\n }\n\n // ros::spin();\n return 0;\n}\n", "meta": {"hexsha": "a6502c506e2a36fc50ce2eff4528471bc3d9ff11", "size": 54683, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "usp_nodes/monitoring/src/continuous_monitoring.cpp", "max_stars_repo_name": "hecperleo/gauss", "max_stars_repo_head_hexsha": "20ece37af00455ee760dcef1d583300eaa347a1d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2021-06-22T16:53:08.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-27T05:06:49.000Z", "max_issues_repo_path": "usp_nodes/monitoring/src/continuous_monitoring.cpp", "max_issues_repo_name": "hecperleo/gauss", "max_issues_repo_head_hexsha": "20ece37af00455ee760dcef1d583300eaa347a1d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2021-09-10T10:24:20.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-10T10:24:20.000Z", "max_forks_repo_path": "usp_nodes/monitoring/src/continuous_monitoring.cpp", "max_forks_repo_name": "hecperleo/gauss", "max_forks_repo_head_hexsha": "20ece37af00455ee760dcef1d583300eaa347a1d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2021-03-25T12:50:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-11T11:14:21.000Z", "avg_line_length": 48.6936776492, "max_line_length": 288, "alphanum_fraction": 0.6493974361, "num_tokens": 13536, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.746138993030751, "lm_q2_score": 0.6688802669716107, "lm_q1q2_score": 0.49907764885633754}} {"text": "#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"mandelbrot.hpp\"\n\nusing namespace tg;\n\nconst int IMG_X = 12800;\nconst int IMG_Y ((IMG_X/(NUM_SPES*4))*3*NUM_SPES); // Approximately 4:3 aspect ratio with number of rows multiple of NUM_SPES\n\nconst int MAX_ITERATIONS = 1000;\nconst char* OUTPUT_FILE = \"mandelbrot_set.png\";\nconst float GAMMA_EXPONENT = 3.5f;\n\nconst int UNROLL_FACTOR = 8; // MUST BE A DIVISOR OF IMG_X!\nBOOST_STATIC_ASSERT(IMG_X % UNROLL_FACTOR == 0);\n\ntypedef TaskGraph mandel_tg;\n\nint main(int argc, char* argv[]) {\n int threads;\n const Options options = getOptions(\"mandelbrot\", argc, argv);\n\n if (options.threads < 1 || options.threads > NUM_SPES)\n {\n std::cerr << \"Please select a number of threads between 1 and \" << NUM_SPES << \" (inclusive).\" << std::endl;\n exit(EXIT_FAILURE);\n }\n else if (IMG_Y % options.threads != 0)\n {\n std::cerr << \"Please choose a number of threads that is a factor of the image height (\" << IMG_Y << \").\" << std::endl;\n exit(EXIT_FAILURE);\n }\n else\n {\n threads = options.threads;\n }\n\n FILE *output_file;\n boost::ptr_vector taskGraphs; \n TaskFarm tFarm;\n\n mandel_tg T; \n taskgraph(mandel_tg, T, tuple2(y_pos, line)) {\n tVar(float, x);\n tVar(float, y);\n tVar(float, x0);\n tVar(float, y0);\n tVar(float, xtemp);\n tVar(int, x_pos);\n tVar(int, iteration);\n tVar(float, pixel);\n \n y0 = (y_pos * (3.0f / IMG_Y)) - 1.5f;\n \n // Iterate over the scanline\n tFor (x_pos, 0, IMG_X - 1) {\n x0 = (x_pos * (3.5f / IMG_X)) - 2.5f;\n \n // Unroll the loop\n for (int i = 0; i < UNROLL_FACTOR; ++i) {\n x = x0;\n y = y0;\n iteration = 0;\n \n // Test if this location is in the set\n tWhile ((x*x + y*y) < 4 && iteration < MAX_ITERATIONS) {\n xtemp = (x*x) - (y*y) + x0;\n y = 2*x*y + y0;\n x = xtemp;\n iteration+=1;\n }\n\n // Calculate and set the value of the pixel\n tIf(iteration == MAX_ITERATIONS)\n pixel = 0.0f;\n tElse\n pixel = 8.0f * (iteration + 1.0f - tLogf(tLogf(tSqrtf(x*x + y*y))) / std::log(2.0f)) / MAX_ITERATIONS;\n\n line[x_pos] = tPowf(pixel, (1.0f / GAMMA_EXPONENT)) * 255.0f;\n x_pos+=1;\n }\n x_pos-=1;\n }\n }\n\n T.compile(tg::SPU_GCC, false);\n\n for(int index=0; index(NULL));\n exit(1);\n }\n \n png_init_io(png_ptr, output_file); \n png_set_IHDR(png_ptr, info_ptr, IMG_X, IMG_Y, 8,\n PNG_COLOR_TYPE_PALETTE,\n PNG_INTERLACE_NONE,\n PNG_COMPRESSION_TYPE_DEFAULT,\n PNG_FILTER_TYPE_DEFAULT);\n png_set_gamma(png_ptr, 0.5, 0.45455);\n\n std::vector palette(generateBlueYellowColourMap());\n png_set_PLTE(png_ptr, info_ptr, &palette[0], palette.size());\n png_write_info(png_ptr, info_ptr);\n \n std::vector scanlines(threads); \n\n for (int spe = 0; spe < threads; ++spe)\n scanlines[spe] = static_cast(spu_malloc(IMG_X));\n\n for (int line = 0; line < IMG_Y; line+=threads)\n {\n std::vector lineNumbers(threads);\n\n for(int spe=0; spe(scanlines[spe]);\n png_write_row(png_ptr, png_row_ptr);\n }\n }\n \n for (int spe = 0; spe < threads; ++spe)\n spu_free(scanlines[spe]);\n \n png_write_end(png_ptr, info_ptr);\n png_destroy_write_struct(&png_ptr, &info_ptr);\n\n exit(EXIT_SUCCESS);\n}\n\n", "meta": {"hexsha": "b61e607814935cc4dd5fe67de5682ceec0dcc284", "size": 4572, "ext": "cc", "lang": "C++", "max_stars_repo_path": "examples/cell/mandelbrot/mandelbrot.cc", "max_stars_repo_name": "paulhjkelly/taskgraph-metaprogramming", "max_stars_repo_head_hexsha": "54c4e2806a97bec555a90784ab4cf0880660bf89", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2020-04-11T21:30:19.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-04T16:16:09.000Z", "max_issues_repo_path": "examples/cell/mandelbrot/mandelbrot.cc", "max_issues_repo_name": "paulhjkelly/taskgraph-metaprogramming", "max_issues_repo_head_hexsha": "54c4e2806a97bec555a90784ab4cf0880660bf89", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/cell/mandelbrot/mandelbrot.cc", "max_forks_repo_name": "paulhjkelly/taskgraph-metaprogramming", "max_forks_repo_head_hexsha": "54c4e2806a97bec555a90784ab4cf0880660bf89", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.5421686747, "max_line_length": 125, "alphanum_fraction": 0.6194225722, "num_tokens": 1399, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7461389930307512, "lm_q2_score": 0.6688802669716106, "lm_q1q2_score": 0.49907764885633754}} {"text": "#ifndef UTIL_RANDOM_HPP_\n#define UTIL_RANDOM_HPP_\n\n#include \n#include \n\n#include \n\n#include \"SpinMutex.hpp\"\n#include \"ThreadIndexManager.hpp\"\n\nnamespace util\n{\n\nclass Random\n{\nprivate:\n\tstatic std::vector random;\n\tstatic util::SpinMutex mutex;\n\npublic:\n\tRandom() : engine(std::random_device{}()) {}\n\tstd::default_random_engine engine;\n\n\tstatic void seed(unsigned int seed, int thread = util::ThreadIndexManager::getLocalId())\n\t{\n\t\tgetEngine(thread).seed(seed);\n\t}\n\t\n\tstatic int nextInt(int min, int max, int thread = util::ThreadIndexManager::getLocalId())\n\t{\n\t\tstd::uniform_int_distribution<> dist(min, max);\n\t\treturn dist(getEngine(thread));\n\t}\n\n\tstatic unsigned long long nextULL(int thread = util::ThreadIndexManager::getLocalId())\n\t{\n\t\tstd::uniform_int_distribution dist(\n\t\t\tstd::numeric_limits::min(),\n\t\t\tstd::numeric_limits::max()\n\t\t);\n\t\treturn dist(getEngine(thread));\n\t}\n\n\tstatic float nextReal(int thread = util::ThreadIndexManager::getLocalId())\n\t{\n\t\tstd::uniform_real_distribution<> dist;\n\t\treturn dist(getEngine(thread));\n\t}\n\n\tstatic std::default_random_engine& getEngine(int thread = util::ThreadIndexManager::getLocalId())\n\t{\n\t\tif(random.size() <= thread){\n std::lock_guard lock(mutex);\n\t\t\tif(random.size() <= thread){\n random.resize(thread + 1);\n }\n }\n\n\t\treturn random[thread].engine;\n\t}\n};\n\n} // end of namespace util\n\n#endif\n", "meta": {"hexsha": "c71add2a2849696ca47a1a91ae932611194527a8", "size": 1506, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "solver/modules/util/include/util/Random.hpp", "max_stars_repo_name": "taiheioki/procon2014_ut", "max_stars_repo_head_hexsha": "8199ff0a54220f1a0c51acece377f65b64db4863", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2021-04-14T06:41:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-29T01:56:08.000Z", "max_issues_repo_path": "solver/modules/util/include/util/Random.hpp", "max_issues_repo_name": "taiheioki/procon2014_ut", "max_issues_repo_head_hexsha": "8199ff0a54220f1a0c51acece377f65b64db4863", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "solver/modules/util/include/util/Random.hpp", "max_forks_repo_name": "taiheioki/procon2014_ut", "max_forks_repo_head_hexsha": "8199ff0a54220f1a0c51acece377f65b64db4863", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.4776119403, "max_line_length": 98, "alphanum_fraction": 0.7011952191, "num_tokens": 351, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7461390043208003, "lm_q2_score": 0.6688802537704064, "lm_q1q2_score": 0.4990776465580952}} {"text": "/*\nMIT License\n\nCopyright (c) 2018 Bastien Durix\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n */\n\n#ifndef _VECTOR3_H_\n#define _VECTOR3_H_\n\n#include \n#include \n\n/**\n * @brief Class representing a 3D vector\n */\ntemplate\nclass Vector3\n{\n public:\n\t\t/**\n\t\t * @brief Evaluation of the vector value\n\t\t */\n template\n static inline Eigen::Vector3d eval(const T &p, Args... args)\n\t\t{\n\t\t\treturn Eigen::Vector3d(X0::eval(p,args...),X1::eval(p,args...),X2::eval(p,args...));\n\t\t};\n\n\t\t/**\n\t\t * @brief Writing of the vector value\n\t\t */\n template\n static std::string write(Args... args)\n {\n return \"(\" + X0::write(args...) + \",\" + X1::write(args...) + \",\" + X2::write(args...) + \")\";\n };\n\t\t\n\t\t/**\n\t\t * @brief Struct recursively applying the modifier F\n\t\t */\n\t\ttemplate typename F>\n\t\tstruct apply_rec\n\t\t{\n\t\t\tusing type = Vector3::type,\n\t\t\t\t\t\t\t\t typename X1::template apply_rec::type,\n\t\t\t\t\t\t\t\t typename X2::template apply_rec::type>;\n\t\t};\n\n\t\t/**\n\t\t * @brief Importance order\n\t\t */\n\t\tstatic const unsigned int outerOrder = 3;\n\n\t\t/**\n\t\t * @brief Importance order\n\t\t */\n\t\tstatic const unsigned int innerOrder = 0;\n};\n\n/**\n * @brief Derivative of the parameter with respect to an argument\n */\ntemplate\nstruct Der,A>\n{\n using type = Vector3::type,typename Der::type,typename Der::type>;\n};\n\n/**\n * @brief Simplification of the 3D vector\n */\ntemplate\nstruct Simp >\n{\n using type = Vector3::type,typename Simp::type,typename Simp::type>;\n};\n\n#endif //_VECTOR3_H_\n\n", "meta": {"hexsha": "53e14aa9b3fd8128eb4f49f3b3fccdba3d7b5db1", "size": 2864, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/lib/basic_operands/Vector3.hpp", "max_stars_repo_name": "Ibujah/derivative", "max_stars_repo_head_hexsha": "7f1187323e23ea84ce719b7b539390b0d58940e3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/lib/basic_operands/Vector3.hpp", "max_issues_repo_name": "Ibujah/derivative", "max_issues_repo_head_hexsha": "7f1187323e23ea84ce719b7b539390b0d58940e3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/lib/basic_operands/Vector3.hpp", "max_forks_repo_name": "Ibujah/derivative", "max_forks_repo_head_hexsha": "7f1187323e23ea84ce719b7b539390b0d58940e3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2244897959, "max_line_length": 104, "alphanum_fraction": 0.6951815642, "num_tokens": 711, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7461390043208003, "lm_q2_score": 0.668880247169804, "lm_q1q2_score": 0.49907764163312834}} {"text": "//\n// Created by Dominik Krupke, http://krupke.cc on 10/7/17.\n//\n\n#include \n#include \n#include \"min_vertex_cover.h\"\n\nTEST(Library, IsValidVertexCover) {\n using Graph = boost::adjacency_list;\n Graph graph{6};\n boost::add_edge(0, 3, graph);\n boost::add_edge(0, 4, graph);\n boost::add_edge(0, 5, graph);\n boost::add_edge(1, 3, graph);\n boost::add_edge(1, 4, graph);\n boost::add_edge(1, 5, graph);\n boost::add_edge(2, 3, graph);\n boost::add_edge(2, 4, graph);\n boost::add_edge(2, 5, graph);\n\n std::vector::vertex_descriptor> vertex_cover_1;\n vertex_cover_1.push_back(0);\n vertex_cover_1.push_back(1);\n std::vector::vertex_descriptor> vertex_cover_2;\n vertex_cover_2.push_back(0);\n vertex_cover_2.push_back(1);\n vertex_cover_2.push_back(2);\n\n ASSERT_FALSE(bipartvc::is_valid_vertex_cover(graph, vertex_cover_1));\n ASSERT_TRUE(bipartvc::is_valid_vertex_cover(graph, vertex_cover_2));\n}\n\nTEST(Library, MinVertexCover) {\n using Graph = boost::adjacency_list;\n Graph graph{7};\n boost::add_edge(0, 3, graph);\n boost::add_edge(0, 4, graph);\n boost::add_edge(0, 5, graph);\n boost::add_edge(1, 3, graph);\n boost::add_edge(1, 4, graph);\n boost::add_edge(1, 5, graph);\n boost::add_edge(2, 3, graph);\n boost::add_edge(2, 4, graph);\n boost::add_edge(2, 5, graph);\n boost::add_edge(2, 6, graph);\n\n auto partition_classifier = [](boost::graph_traits::vertex_descriptor v) -> bipartvc::Partition {\n if (v < 3) {\n return bipartvc::Partition::A;\n } else {\n return bipartvc::Partition::B;\n }\n };\n auto vertex_cover = bipartvc::get_minimal_vertex_cover(graph, partition_classifier);\n\n ASSERT_EQ(vertex_cover.size(), 3);\n ASSERT_TRUE(bipartvc::is_valid_vertex_cover(graph, vertex_cover));\n\n //swap partitions (A is treated differently than B by the algorithm so we should check this)\n auto complementary_partition_classifier = [](boost::graph_traits::vertex_descriptor v) -> bipartvc::Partition {\n if (v < 3) {\n return bipartvc::Partition::B;\n } else {\n return bipartvc::Partition::A;\n }\n };\n auto vertex_cover_2 = bipartvc::get_minimal_vertex_cover(graph, complementary_partition_classifier);\n\n ASSERT_EQ(vertex_cover_2.size(), 3);\n ASSERT_TRUE(bipartvc::is_valid_vertex_cover(graph, vertex_cover_2));\n\n //and with automatic partition detection.\n auto vertex_cover_3 = bipartvc::get_minimal_vertex_cover(graph);\n ASSERT_EQ(vertex_cover_3.size(), 3);\n ASSERT_TRUE(bipartvc::is_valid_vertex_cover(graph, vertex_cover_3));\n}\n\nTEST(MinVertexCover, Star) {\n using Graph = boost::adjacency_list;\n Graph graph{7};\n boost::add_edge(0, 2, graph);\n boost::add_edge(0, 3, graph);\n boost::add_edge(0, 4, graph);\n boost::add_edge(0, 5, graph);\n boost::add_edge(0, 6, graph);\n boost::add_edge(1, 2, graph);\n boost::add_edge(1, 3, graph);\n boost::add_edge(1, 4, graph);\n boost::add_edge(1, 5, graph);\n boost::add_edge(1, 6, graph);\n auto partition_classifier = [](boost::graph_traits::vertex_descriptor v) -> bipartvc::Partition {\n if (v < 2) {\n return bipartvc::Partition::A;\n } else {\n return bipartvc::Partition::B;\n }\n };\n auto vertex_cover = bipartvc::get_minimal_vertex_cover(graph, partition_classifier);\n\n ASSERT_EQ(vertex_cover.size(), 2);\n ASSERT_TRUE(bipartvc::is_valid_vertex_cover(graph, vertex_cover));\n\n //swap partitions (A is treated differently than B by the algorithm so we should check this)\n auto complementary_partition_classifier = [](boost::graph_traits::vertex_descriptor v) -> bipartvc::Partition {\n if (v < 2) {\n return bipartvc::Partition::B;\n } else {\n return bipartvc::Partition::A;\n }\n };\n auto vertex_cover_2 = bipartvc::get_minimal_vertex_cover(graph, complementary_partition_classifier);\n\n ASSERT_EQ(vertex_cover_2.size(), 2);\n ASSERT_TRUE(bipartvc::is_valid_vertex_cover(graph, vertex_cover_2));\n\n //and with automatic partition detection.\n auto vertex_cover_3 = bipartvc::get_minimal_vertex_cover(graph);\n ASSERT_EQ(vertex_cover_3.size(), 2);\n ASSERT_TRUE(bipartvc::is_valid_vertex_cover(graph, vertex_cover_3));\n\n}\n\nTEST(MinVertexCover, ConnectedStar) {\n using Graph = boost::adjacency_list;\n Graph graph{12};\n boost::add_edge(0, 2, graph);\n boost::add_edge(0, 3, graph);\n boost::add_edge(0, 4, graph);\n boost::add_edge(0, 5, graph);\n boost::add_edge(0, 6, graph);\n boost::add_edge(1, 7, graph);\n boost::add_edge(1, 8, graph);\n boost::add_edge(1, 9, graph);\n boost::add_edge(1, 10, graph);\n boost::add_edge(1, 11, graph);\n boost::add_edge(0, 1, graph);\n\n auto vertex_cover_3 = bipartvc::get_minimal_vertex_cover(graph);\n ASSERT_EQ(vertex_cover_3.size(), 2);\n ASSERT_TRUE(bipartvc::is_valid_vertex_cover(graph, vertex_cover_3));\n\n}\n\nTEST(MinVertexCover, EvenCycle) {\n using Graph = boost::adjacency_list;\n Graph graph{12};\n boost::add_edge(0, 1, graph);\n boost::add_edge(1, 2, graph);\n boost::add_edge(2, 3, graph);\n boost::add_edge(3, 4, graph);\n boost::add_edge(4, 5, graph);\n boost::add_edge(5, 6, graph);\n boost::add_edge(6, 7, graph);\n boost::add_edge(7, 8, graph);\n boost::add_edge(8, 9, graph);\n boost::add_edge(9, 10, graph);\n boost::add_edge(10, 11, graph);\n boost::add_edge(11, 0, graph);\n\n auto vertex_cover_3 = bipartvc::get_minimal_vertex_cover(graph);\n ASSERT_EQ(vertex_cover_3.size(), 6);\n ASSERT_TRUE(bipartvc::is_valid_vertex_cover(graph, vertex_cover_3));\n\n}\n\n", "meta": {"hexsha": "78893369a7116c44e74e94763b3db343d46f5826", "size": 5717, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "min_vertex_cover_gtest.cpp", "max_stars_repo_name": "d-krupke/bipartite_vertex_cover", "max_stars_repo_head_hexsha": "4ad5bbf1d8490e131d093cd12addaac8f531345f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "min_vertex_cover_gtest.cpp", "max_issues_repo_name": "d-krupke/bipartite_vertex_cover", "max_issues_repo_head_hexsha": "4ad5bbf1d8490e131d093cd12addaac8f531345f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "min_vertex_cover_gtest.cpp", "max_forks_repo_name": "d-krupke/bipartite_vertex_cover", "max_forks_repo_head_hexsha": "4ad5bbf1d8490e131d093cd12addaac8f531345f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2335329341, "max_line_length": 120, "alphanum_fraction": 0.7091131712, "num_tokens": 1753, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7461389930307512, "lm_q2_score": 0.6688802537704063, "lm_q1q2_score": 0.49907763900640423}} {"text": "#pragma once\n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \"globals.hpp\"\n\n//the Test_Suite namespace contains functions that aid in\n//verifying the correctness of gcd algorithms as well as\n//testing the performance of generic gcd routines\nnamespace Test_Suite{\n\t\n\t//these functions make sure that the gcd algorithms are correct\n\ttemplate bool Check_Algorithm_Validity(std::vector&(*fun_)(std::vector&));\n\ttemplate bool Check_Answer_Validity(std::vector const& vec);\n\t\n\t//utility\n\ttemplate boost::random::uniform_int_distribution Get_Specified_Bit_Length_Distribution(int bit_length);\n\t\n};\n\n//these functions make sure that the gcd algorithms are correct\ntemplate bool Test_Suite::Check_Algorithm_Validity(std::vector&(*fun_)(std::vector&)){\n\tint fail_rate=0;\n\t\n\tboost::random::mt19937 gen(std::time(0));\n\tDefault_Test_Element a;\n\tstd::vector vec;\n\tauto dist = Test_Suite::Get_Specified_Bit_Length_Distribution(30);\n\t\n\t//fill up the vector\n\tfor (int i = 0; i < 100; ++i){\n\t\ta = dist(gen);\n\t\tvec.push_back(a);\n\t}\n\t\n\t//std::cout << \"no: \"; for (auto it: vec){std::cout << it << \",\";}std::cout << std::endl << std::endl;\n\t\n\tfun_(vec);\n\tfail_rate += Test_Suite::Check_Answer_Validity(vec);\n\t\n\tbool failed = false;\n\tif (fail_rate > 0){failed = true;}\n\treturn failed;\n}\ntemplate bool Test_Suite::Check_Answer_Validity(std::vector const& vec){\n\t\n\tDefault_Test_Element min = 0;\n\tif (!vec.empty()){\n\t\tmin = vec[0];\n\t}\n\t\n\tfor (auto const& it: vec){\n\t\tif (min > it){\n\t\t\tstd::cerr << \"list not sorted.\" << std::endl;\n\t\t\treturn false;\n\t\t}\n\t}\n\t\n\t//std::cout << \"yes: \"; for (auto it: vec){std::cout << it << \",\";}std::cout << std::endl << std::endl;\n\t\n\treturn true;\n}\n\n//utility\ntemplate boost::random::uniform_int_distribution Test_Suite::Get_Specified_Bit_Length_Distribution(int bit_length){\n\tIntegerType min = pow(2,bit_length-1);\n\tIntegerType max = pow(2,bit_length)-1;\n\treturn boost::random::uniform_int_distribution(min,max);\n}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "meta": {"hexsha": "73fa36bc11e1276036004304b76d6b20a16c0d9e", "size": 2456, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "source/code/scratch/old_repos/edinboro/CSCI-385/sort-algorithms/code/test_suite.hpp", "max_stars_repo_name": "luxe/CodeLang-compiler", "max_stars_repo_head_hexsha": "78837d90bdd09c4b5aabbf0586a5d8f8f0c1e76a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 33.0, "max_stars_repo_stars_event_min_datetime": "2019-05-30T07:43:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-30T13:12:32.000Z", "max_issues_repo_path": "source/code/scratch/old_repos/edinboro/CSCI-385/sort-algorithms/code/test_suite.hpp", "max_issues_repo_name": "luxe/CodeLang-compiler", "max_issues_repo_head_hexsha": "78837d90bdd09c4b5aabbf0586a5d8f8f0c1e76a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 371.0, "max_issues_repo_issues_event_min_datetime": "2019-05-16T15:23:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-04T15:45:27.000Z", "max_forks_repo_path": "source/code/scratch/old_repos/edinboro/CSCI-385/sort-algorithms/code/test_suite.hpp", "max_forks_repo_name": "UniLang/compiler", "max_forks_repo_head_hexsha": "c338ee92994600af801033a37dfb2f1a0c9ca897", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6.0, "max_forks_repo_forks_event_min_datetime": "2019-08-22T17:37:36.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-07T07:15:32.000Z", "avg_line_length": 26.989010989, "max_line_length": 161, "alphanum_fraction": 0.7333061889, "num_tokens": 630, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7461389817407016, "lm_q2_score": 0.6688802537704064, "lm_q1q2_score": 0.4990776314547131}} {"text": "// Author(s): Jeroen Keiren\n// Copyright: see the accompanying file COPYING or copy at\n// https://github.com/mCRL2org/mCRL2/blob/master/COPYING\n//\n// Distributed under the Boost Software License, Version 1.0.\n// (See accompanying file LICENSE_1_0.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt)\n//\n// / \\file bag_test.cpp\n// / \\brief Basic regression test for bag expressions.\n\n#define BOOST_TEST_MODULE bag_test\n#include \n\n#include \"mcrl2/data/bag.h\"\n#include \"mcrl2/data/parse.h\"\n#include \"mcrl2/data/rewriter.h\"\n\n\nusing namespace mcrl2;\nusing namespace mcrl2::data;\n\n// test whether parsing s returns an expression matching predicate p.\n// Furthermore check whether the expression does not satisfy q.\ntemplate \nvoid test_data_expression(const std::string& s, const variable_vector& v, Predicate p, NegativePredicate q)\n{\n std::cerr << \"testing data expression \" << s << std::endl;\n data_expression e = parse_data_expression(s, v);\n std::cerr << \"parsed expression \" << e << \"\\n\";\n BOOST_CHECK(p(e));\n BOOST_CHECK(!q(e));\n}\n\nvoid test_expression(const std::string& evaluate, const std::string& expected, data::rewriter r)\n{\n data_expression d1 = parse_data_expression(evaluate);\n data_expression d2 = parse_data_expression(expected);\n if (r(d1)!=r(d2))\n {\n std::cerr << \"Evaluating: \" << evaluate << \"\\n\";\n std::cerr << \"Result: \" << d1 << \"\\n\";\n std::cerr << \"Expected result: \" << expected << \"\\n\";\n BOOST_CHECK(r(d1) == r(d2));\n std::cerr << \"------------------------------------------------------\\n\";\n }\n}\n\n\nvoid bag_expression_test()\n{\n data::data_specification specification;\n\n specification.add_context_sort(sort_bag::bag(sort_pos::pos()));\n specification.add_context_sort(sort_bag::bag(sort_bool::bool_()));\n\n data::rewriter normaliser(specification);\n\n variable_vector v;\n v.push_back(parse_variable(\"b:Bag(Nat)\"));\n\n BOOST_CHECK(sort_bag::is_bag(sort_bag::bag(sort_nat::nat())));\n BOOST_CHECK(!sort_bag::is_bag(sort_nat::nat()));\n\n test_data_expression(\"{x : Nat | x}\", v, sort_bag::is_constructor_application, sort_bag::is_in_application);\n test_data_expression(\"1 in b\", v, sort_bag::is_in_application, sort_bag::is_union_application);\n test_data_expression(\"{:} + b\", v, sort_bag::is_union_application, sort_bag::is_intersection_application);\n test_data_expression(\"(({:} + b) - {20:1}) * {40:5}\", v, sort_bag::is_intersection_application, is_less_application);\n test_data_expression(\"{10:count(20,b)} < b\", v, is_less_application, sort_bag::is_bag_comprehension_application);\n test_data_expression(\"b <= {20:2}\", v, is_less_equal_application, sort_bag::is_set2bag_application);\n test_data_expression(\"Set2Bag({20,30,40})\", v, sort_bag::is_set2bag_application, sort_bag::is_union_application);\n test_data_expression(\"{20:2} + Set2Bag({20,30,40})\", v, sort_bag::is_union_application, is_less_equal_application);\n test_data_expression(\"b <= Set2Bag({20,30,40})\", v, is_less_equal_application, sort_bag::is_constructor_application);\n\n test_data_expression(\"b <= {20:2} + Set2Bag({20,30,40})\", v, is_less_equal_application, sort_bag::is_constructor_application);\n\n data_expression e = parse_data_expression(\"{20:1}\", v);\n BOOST_CHECK(sort_fbag::is_cons_application(normaliser(e)));\n\n e = parse_data_expression(\"{20:4, 30:3, 40:2}\", v);\n BOOST_CHECK(sort_fbag::is_cons_application(normaliser(e)));\n\n e = parse_data_expression(\"{10:count(20,b)}\", v);\n BOOST_CHECK(sort_fbag::is_cinsert_application(normaliser(e)));\n\n // Chect the operation == on bags\n test_expression(\"{:} == ({true:2} - {true:2})\",\"true\",normaliser); // {true}-{true} is a trick to type {:} == {:}. \n test_expression(\"{:} == {false:2}\", \"false\",normaliser);\n test_expression(\"{:} == {true:2}\", \"false\",normaliser);\n test_expression(\"{true:2} == {true:3}\", \"false\",normaliser);\n test_expression(\"{false:2} == {false:2}\", \"true\",normaliser);\n test_expression(\"{true:2} == {false:2, true:2}\", \"false\",normaliser);\n test_expression(\"{false:2, true:2} == {false:2}\", \"false\",normaliser);\n test_expression(\"{false:2, true:2} == {true:2, false:2}\", \"true\",normaliser);\n\n // Check the operation < on bags.\n test_expression(\"{:} < ({true:2} - {true:2})\",\"false\",normaliser); // {true}-{true} is a trick to type {:} == {:}. \n test_expression(\"{true:2} < {false:4}\", \"false\",normaliser);\n test_expression(\"{true:2} < {false:2}\", \"false\",normaliser);\n test_expression(\"{false:2} < {true:4}\", \"false\",normaliser);\n test_expression(\"{true:2} < {true:3}\", \"true\",normaliser);\n test_expression(\"{false:2} < {false:1}\", \"false\",normaliser);\n test_expression(\"{true:2} < {false:4, true:2}\", \"true\",normaliser);\n test_expression(\"{false:2} < {false:1, true:2}\", \"false\",normaliser);\n test_expression(\"{true:2} < {true:4, false:2}\", \"true\",normaliser);\n test_expression(\"{false:2} < {true:4, false:2}\", \"true\",normaliser);\n test_expression(\"{true:2, false:4} < {true:4}\", \"false\",normaliser);\n test_expression(\"{true:2, false:4} < {false:2}\", \"false\",normaliser);\n test_expression(\"{false:2, true:4} < {true:2}\", \"false\",normaliser);\n test_expression(\"{false:2, true:4} < {false:5}\", \"false\",normaliser);\n test_expression(\"{true:2, false:4} < {false:2, true:2}\", \"false\",normaliser);\n test_expression(\"{true:2, false:4} < {true:2, false:2}\", \"false\",normaliser);\n test_expression(\"{false:2, true:1} < {false:2, true:2}\", \"true\",normaliser);\n test_expression(\"{false:2, true:4} < {true:7, false:2}\", \"true\",normaliser);\n test_expression(\"{false:1, false:1} < {false:2}\", \"false\",normaliser);\n \n // Check the operation <= on bags.\n test_expression(\"{:} <= ({true:2}-{true:2})\",\"true\",normaliser); // {true} - {true} is a trick to type {:} == {:}.\n test_expression(\"{true:2} <= {false:2}\", \"false\",normaliser);\n test_expression(\"{false:2} <= {true:2}\", \"false\",normaliser);\n test_expression(\"{true:2} <= {true:2}\", \"true\",normaliser);\n test_expression(\"{true:3} <= {true:2}\", \"false\",normaliser);\n test_expression(\"{false:2} <= {false:1}\", \"false\",normaliser);\n test_expression(\"{false:2} <= {false:7}\", \"true\",normaliser);\n test_expression(\"{true:2} <= {false:2, true:2}\", \"true\",normaliser);\n test_expression(\"{false:2} <= {false:4, true:2}\", \"true\",normaliser);\n test_expression(\"{true:2} <= {true:1, false:2}\", \"false\",normaliser);\n test_expression(\"{false:2} <= {true:2, false:2}\", \"true\",normaliser);\n test_expression(\"{true:2, false:2} <= {true:3}\", \"false\",normaliser);\n test_expression(\"{true:2, false:2} <= {false:1}\", \"false\",normaliser);\n test_expression(\"{false:2, true:2} <= {true:2}\", \"false\",normaliser);\n test_expression(\"{false:2, true:2} <= {false:2}\", \"false\",normaliser);\n test_expression(\"{true:2, false:2} <= {false:2, true:2}\", \"true\",normaliser);\n test_expression(\"{true:2, false:2} <= {true:2, false:2}\", \"true\",normaliser);\n test_expression(\"{false:2, true:2} <= {false:2, true:2}\", \"true\",normaliser);\n test_expression(\"{false:2, true:2} <= {true:2, false:2}\", \"true\",normaliser);\n test_expression(\"{false:1, false:2} <= {false:2}\", \"false\", normaliser);\n\n // Test the operation - on bags.\n test_expression(\"{true:0} - {:}\", \"{true:0}\", normaliser);\n test_expression(\"{:} - {true:1}\", \"{true:0}\", normaliser);\n test_expression(\"{true:1} - {:}\", \"{true:1}\", normaliser);\n test_expression(\"{:} - {true:2}\", \"{true:0}\", normaliser);\n test_expression(\"{true:2} - {:}\", \"{true:2}\", normaliser);\n test_expression(\"{true:1} - {true:1}\", \"{true:0}\", normaliser);\n test_expression(\"{true:2} - {true:1}\", \"{true:1}\", normaliser);\n test_expression(\"{true:1} - {true:2}\", \"{true:0}\", normaliser);\n test_expression(\"{true:1} - {false:1}\", \"{true:1}\", normaliser);\n test_expression(\"{false:1} - {true:1}\", \"{false:1}\", normaliser);\n test_expression(\"{true:2} - {false:1}\", \"{true:2}\", normaliser);\n test_expression(\"{false:2} - {true:1}\", \"{false:2}\", normaliser);\n test_expression(\"{true:1} - {false:2}\", \"{true:1}\", normaliser);\n test_expression(\"{false:1} - {true:2}\", \"{false:1}\", normaliser);\n test_expression(\"{true:1, false:1} - {false:1}\", \"{true:1}\", normaliser);\n test_expression(\"{true:1, false:1} - {true:1}\", \"{false:1}\", normaliser);\n test_expression(\"{true:1, false:1} - {true:1, false:1}\", \"{true:0}\",\n normaliser);\n test_expression(\"{true:1, false:1} - {false:1, true:1}\", \"{true:0}\",\n normaliser);\n test_expression(\"{true:2, false:2} - {false:1}\", \"{true:2, false:1}\",\n normaliser);\n test_expression(\"{true:2, false:2} - {true:1}\", \"{true:1, false:2}\",\n normaliser);\n test_expression(\"{true:2, false:2} - {true:1,false:1}\", \"{true:1, false:1}\",\n normaliser);\n test_expression(\"{true:2, false:2} - {false:1,true:1}\", \"{true:1, false:1}\",\n normaliser);\n test_expression(\"{true:2, false:2} - {false:2}\", \"{true:2}\", normaliser);\n test_expression(\"{true:2, false:2} - {false:2}\", \"{true:2}\", normaliser);\n}\n\nBOOST_AUTO_TEST_CASE(test_main)\n{\n bag_expression_test();\n}\n\n", "meta": {"hexsha": "b446bbba1acb871615c13358007e3b56a5a07d07", "size": 9175, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "libraries/data/test/bag_test.cpp", "max_stars_repo_name": "sdrees/mCRL2", "max_stars_repo_head_hexsha": "bbda4c85022bc21cfa3eab3aafd07e60e89dee2d", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "libraries/data/test/bag_test.cpp", "max_issues_repo_name": "sdrees/mCRL2", "max_issues_repo_head_hexsha": "bbda4c85022bc21cfa3eab3aafd07e60e89dee2d", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "libraries/data/test/bag_test.cpp", "max_forks_repo_name": "sdrees/mCRL2", "max_forks_repo_head_hexsha": "bbda4c85022bc21cfa3eab3aafd07e60e89dee2d", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 51.8361581921, "max_line_length": 145, "alphanum_fraction": 0.6563487738, "num_tokens": 2735, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7461389817407016, "lm_q2_score": 0.6688802537704063, "lm_q1q2_score": 0.499077631454713}} {"text": "/* \n// Copyright 2018 University of Liege\n// \n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n// \n// http://www.apache.org/licenses/LICENSE-2.0\n// \n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n//\n// Authors:\n// - Adrien Crovato\n*/\n\n//// Compressible module (field solver)\n// Compute field variables and field sources\n//\n// I/O:\n// - Minf: freestream Mach number\n// - vInf: freestream velocity vector\n// - bPan: (network of) body panels (structure)\n// - fPan: field panels (structure)\n// - sp: sub-panels (structure)\n// - b2fAIC: body to field AIC (structure)\n// - f2fAIC: field to field AIC (structure)\n// - spAIC: body to field AIC for subpanels (structure)\n\n#include \n#include \n#include \"solve_field.h\"\n#include \"compute_fVars.h\"\n\nusing namespace std;\nusing namespace Eigen;\n\n#define GAMMA 1.4\n#define CMU 1.0\n#define M_C 1.0\n\nvoid solve_field(double Minf, Vector3d &vInf, Network &bPan, Field &fPan, Subpanel &sp,\n Body_AIC &b2fAIC, Field2field_AIC &f2fAIC, Subpanel_AIC &spAIC) {\n\n //// Begin\n cout << \"Computing field variables... \" << flush;\n\n //// Field variables\n compute_fVars(Minf, vInf, bPan, fPan, sp, b2fAIC, f2fAIC, spAIC);\n\n //// Field sources\n // Density gradient\n for (int i = 0; i < fPan.nE; i++) {\n int f = fPan.eIdx(i);\n double dU, dV, dW; // Variables for residual\n\n // X-derivative\n double fb=0, ff=0, fb2=0, ff2=0;\n if (fPan.fbdMap(f,0)) {\n fb = (fPan.rho(f) - fPan.rho(f - 1)) / fPan.deltaX;\n fb2 = (fPan.U(f,0) - fPan.U(f - 1,0)) / fPan.deltaX;\n }\n if (fPan.fbdMap(f,1)) {\n ff = (fPan.rho(f + 1) - fPan.rho(f)) / fPan.deltaX;\n ff2 = (fPan.U(f + 1,0) - fPan.U(f,0)) / fPan.deltaX;\n }\n if (fPan.fbdMap(f,0) && fPan.fbdMap(f,1)) {\n fPan.dRho(f,0) = 0.5*(fb+ff);\n dU = 0.5*(fb2+ff2);\n }\n else {\n fPan.dRho(f,0) = fb+ff;\n dU = fb2+ff2;\n }\n\n // Y-derivative\n fb=0, ff=0, fb2=0, ff2=0;\n if (fPan.fbdMap(f,2)) {\n fb = (fPan.rho(f) - fPan.rho(f - fPan.nX*fPan.nZ)) / fPan.deltaY;\n fb2 = (fPan.U(f,1) - fPan.U(f - fPan.nX*fPan.nZ,1)) / fPan.deltaY;\n }\n if (fPan.fbdMap(f,3)) {\n ff = (fPan.rho(f + fPan.nX*fPan.nZ) - fPan.rho(f)) / fPan.deltaY;\n ff2 = (fPan.U(f + fPan.nX*fPan.nZ,1) - fPan.U(f,1)) / fPan.deltaY;\n }\n if (fPan.fbdMap(f,2) && fPan.fbdMap(f,3)) {\n fPan.dRho(f,1) = 0.5*(fb+ff);\n dV = 0.5*(fb2+ff2);\n }\n else {\n fPan.dRho(f,1) = fb+ff;\n dV = fb2+ff2;\n }\n\n // Z-derivative\n fb=0, ff=0, fb2=0, ff2=0;\n if (fPan.fbdMap(f,4)) {\n fb = (fPan.rho(f) - fPan.rho(f - fPan.nX)) / fPan.deltaZ;\n fb2 = (fPan.U(f,2) - fPan.U(f - fPan.nX,2)) / fPan.deltaZ;\n }\n if (fPan.fbdMap(f,5)) {\n ff = (fPan.rho(f + fPan.nX) - fPan.rho(f)) / fPan.deltaZ;\n ff2 = (fPan.U(f + fPan.nX,2) - fPan.U(f,2)) / fPan.deltaZ;\n }\n if (fPan.fbdMap(f,4) && fPan.fbdMap(f,5)) {\n fPan.dRho(f,2) = 0.5*(fb+ff);\n dW = 0.5*(fb2+ff2);\n }\n else {\n fPan.dRho(f,2) = fb+ff;\n dW = fb2+ff2;\n }\n\n // Field source\n fPan.sigma(f) = -1 / (fPan.rho(f)) * fPan.U.row(f).dot(fPan.dRho.row(f));\n // Residual\n fPan.epsilon(f) = dU + dV + dW - fPan.sigma(f);\n }\n\n // TODO 1) Artificial density (pros: physical; cons: does not work on MG or RG/MG)\n // TODO 2) Artificial viscosity (pros: works on RG/MG; cons: cut through surface, not physical)\n // TODO NB) With current form, x-upwinding gives same results as s-upwinding.\n\n //// Artificial viscosity\n for (int i = 0; i < fPan.nE; i++) {\n int f = fPan.eIdx(i);\n double mu, deltaSigmaX, deltaSigmaY, deltaSigmaZ; // Variables for artificial viscosity\n\n if (fPan.M(f) > M_C) {\n mu = CMU * (1 - M_C * M_C / (fPan.M(f) * fPan.M(f)));\n\n // X-contribution\n if (fPan.U(f,0) > 0 && fPan.fbdMap(f,0))\n deltaSigmaX = fPan.sigma(f) - fPan.sigma(f - 1);\n else if (fPan.U(f,0) < 0 && fPan.fbdMap(f,1))\n deltaSigmaX = fPan.sigma(f + 1) - fPan.sigma(f);\n else\n deltaSigmaX = 0;\n // Y-contribution\n if (fPan.U(f,1) > 0 && fPan.fbdMap(f,2))\n deltaSigmaY = fPan.sigma(f) - fPan.sigma(f - fPan.nX*fPan.nZ);\n else if (fPan.U(f,1) < 0 && fPan.fbdMap(f,3))\n deltaSigmaY = fPan.sigma(f + fPan.nX*fPan.nZ) - fPan.sigma(f);\n else\n deltaSigmaY = 0;\n // Z-contribution\n if (fPan.U(f,2) > 0 && fPan.fbdMap(f,4))\n deltaSigmaZ = fPan.sigma(f) - fPan.sigma(f - fPan.nX);\n else if (fPan.U(f,2) < 0 && fPan.fbdMap(f,5))\n deltaSigmaZ = fPan.sigma(f + fPan.nX) - fPan.sigma(f);\n else\n deltaSigmaZ = 0;\n\n fPan.sigma(f) -= mu / fPan.U.row(f).norm() *\n (fPan.U(f, 0) * deltaSigmaX + fPan.U(f, 1) * deltaSigmaY + fPan.U(f, 2) * deltaSigmaZ);\n }\n }\n\n cout << \"Done!\" << endl;\n cout << \"Max. Mach number: \" << fPan.M.maxCoeff() << endl;\n cout << \"Field sources strength: \" << fPan.sigma.norm() << endl << endl;\n}", "meta": {"hexsha": "90b09dd09b4f115ec1937dc6be1401f29a4b4017", "size": 5852, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/solve_field.cpp", "max_stars_repo_name": "acrovato/aero", "max_stars_repo_head_hexsha": "310e6840670f5a39ca015c61c9090f123da8cfd6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2020-11-16T15:24:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T09:36:09.000Z", "max_issues_repo_path": "src/solve_field.cpp", "max_issues_repo_name": "acrovato/aero", "max_issues_repo_head_hexsha": "310e6840670f5a39ca015c61c9090f123da8cfd6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/solve_field.cpp", "max_forks_repo_name": "acrovato/aero", "max_forks_repo_head_hexsha": "310e6840670f5a39ca015c61c9090f123da8cfd6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.4666666667, "max_line_length": 118, "alphanum_fraction": 0.5251196172, "num_tokens": 1977, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7745833945721304, "lm_q2_score": 0.6442251201477016, "lm_q1q2_score": 0.49900608043264527}} {"text": "\ufeff#include \r\n#include \r\n#include \r\n#include \r\n#include \r\n#include \r\n#include \r\n#include \r\n#include \r\n#include \r\n#include \r\n#include \r\n#include \r\n\r\n// Defining special types\r\ntypedef std::complex dcomp;\r\ntypedef std::vector dvec;\r\ntypedef std::vector< std::vector > ddvec;\r\ntypedef std::vector ivec;\r\ntypedef std::vector< std::vector > iivec;\r\ntypedef std::vector< std::complex > dcvec;\r\ntypedef std::vector< std::vector > > ddcvec;\r\n\r\n/* --------------------------------------------------------------------\r\n\r\n\t\tFUNCTIONS\r\n\r\n-----------------------------------------------------------------------*/\r\n\r\ninline const double pi()\r\n{\r\n\t// Set teh value fo pi\r\n\tconst double pi = std::atan(1) * 4;\r\n\treturn pi;\r\n}\r\n\r\n/* --------------------------------------------------------------------\r\n\t\tFROM MICROSECONDS TO HH:MM:SS:MS\r\n-----------------------------------------------------------------------*/\r\nstd::tuple ms_to_time(long long ms)\r\n{\r\n\t// Defining the variables\r\n\tlong long s, m, h;\r\n\r\n\t// Calculating the time in hour:minutes:seconds:microseconds\r\n\ts = ms / 1000000;\r\n\tms = ms % 1000000;\r\n\tm = s / 60;\r\n\ts = s % 60;\r\n\th = m / 60;\r\n\tm = m % 60;\r\n\r\n\treturn { ms,s,m,h };\r\n}\r\n/* --------------------------------------------------------------------\r\n\t\tPRINT in HH:MM:SS:MS FORMAT\r\n-----------------------------------------------------------------------*/\r\nvoid time_print(long long ms, long long s, long long m, long long h)\r\n{\r\n\r\n\t// Print the time\r\n\tif (h < 10)\r\n\t{\r\n\t\tstd::cout << \"0\" << h << \":\";\r\n\t}\r\n\telse\r\n\t{\r\n\t\tstd::cout << \"\" << h << \":\";\r\n\t}\r\n\tif (m < 10)\r\n\t{\r\n\t\tstd::cout << \"0\" << m << \":\";\r\n\t}\r\n\telse\r\n\t{\r\n\t\tstd::cout << \"\" << m << \":\";\r\n\t}\r\n\tif (s < 10)\r\n\t{\r\n\t\tstd::cout << \"0\" << s << \":\" << ms;\r\n\t}\r\n\telse\r\n\t{\r\n\t\tstd::cout << \"\" << s << \":\" << ms;\r\n\t}\r\n\r\n\treturn;\r\n}\r\n/* --------------------------------------------------------------------\r\n\t\tCHI FROM Z\r\n-----------------------------------------------------------------------*/\r\ninline dcomp chi_from_z(dcomp z, dcomp z1, dcomp z2, double L, double mu)\r\n{\r\n\t// Defining the variables\r\n\tdcomp z0, Z, chi;\r\n\r\n\t// Calculating the chi from a z value\r\n\tz0 = 0.5 * (z1 + z2);\r\n\tZ = exp(dcomp(0, -1) * mu) * 2.0 * (z - z0) / L;\r\n\tchi = Z + sqrt(Z - 1.0) * sqrt(Z + 1.0);\r\n\r\n\treturn chi;\r\n}\r\n/* --------------------------------------------------------------------\r\n\t\tZ FROM CHI\r\n-----------------------------------------------------------------------*/\r\ninline dcomp z_from_chi(dcomp chi, dcomp z1, dcomp z2, double L, double mu)\r\n{\r\n\t// Defining the variables\r\n\tdcomp z, z0, Z;\r\n\r\n\t// Calculating the z from a chi value\r\n\tz0 = 0.5 * (z1 + z2);\r\n\tZ = 0.5 * (chi + 1.0 / chi);\r\n\tz = 0.5 * L * Z * exp(dcomp(0, 1) * mu) + z0;\r\n\r\n\treturn z;\r\n}\r\n/* --------------------------------------------------------------------\r\n\t\tANGEL CHANGE\r\n-----------------------------------------------------------------------*/\r\ninline double angel_change(dcomp z, dcomp z1, dcomp z2)\r\n{\r\n\t// Defining the variables\r\n\tdouble u, v, eta;\r\n\r\n\t// Calculating the angels\r\n\tu = arg(z - z1);\r\n\tv = arg(z1 - z2);\r\n\r\n\t// Correct to angel of 0 to 2pi\r\n\tif (u < 0)\r\n\t{\r\n\t\tu += 2 * pi();\r\n\t}\r\n\tif (v < 0)\r\n\t{\r\n\t\tv += 2 * pi();\r\n\t}\r\n\r\n\t// Calculate angel between the vectors\r\n\teta = abs(u - v);\r\n\r\n\t// Correct to angel of 0 to pi\r\n\tif (eta > pi())\r\n\t{\r\n\t\teta -= 2 * pi();\r\n\t}\r\n\r\n\teta = abs(eta);\r\n\r\n\treturn eta;\r\n}\r\n/* --------------------------------------------------------------------\r\n\t\tFIND INTERSECTION POINT\r\n-----------------------------------------------------------------------*/\r\ninline std::tuple intersection_point(dcomp z1, dcomp z2, dcomp z3, dcomp z4)\r\n{\r\n\t// Defining the variables\r\n\tdcomp zint, Z1, Z2;\r\n\tint int_check = 0;\r\n\r\n\tif (z1 != z3 || z2 != z4)\r\n\t{\r\n\t\t// Calculating the intersection point\r\n\t\tzint = ((conj(z2 - z1) * z1 - (z2 - z1) * conj(z1)) * (z4 - z3) - (conj(z4 - z3) * z3 - (z4 - z3) * conj(z3)) * (z2 - z1)) / ((z4 - z3) * conj(z2 - z1) - (z2 - z1) * conj(z4 - z3));\r\n\r\n\t\t// Check if intersection is on the lines\r\n\t\tZ1 = (zint - .5 * (z1 + z2)) / (.5 * (z1 - z2));\r\n\t\tZ2 = (zint - .5 * (z3 + z4)) / (.5 * (z3 - z4));\r\n\r\n\t\tif (real(Z1 * conj(Z1)) < 1.0)\r\n\t\t{\r\n\t\t\tif (real(Z2 * conj(Z2)) < 1.0)\r\n\t\t\t{\r\n\t\t\t\tint_check = 1;\r\n\t\t\t}\r\n\t\t}\r\n\t\t\r\n\t}\r\n\r\n\treturn { zint, int_check };\r\n}\r\n/* --------------------------------------------------------------------\r\n\t\tGET DELTA\r\n-----------------------------------------------------------------------*/\r\ninline dcomp get_delta(ddcvec ab, int nab, int m, ivec pos_z1, ivec pos_z2)\r\n{\r\n\t// Defintion of variables\r\n\tdcomp delta;\r\n\r\n\tdelta = 0;\r\n\tfor (int ii = 0; ii < nab; ii++)\r\n\t{\r\n\t\t// Adding the jump for the z1 elements\r\n\t\tif (pos_z1[ii] == 1)\r\n\t\t{\r\n\t\t\tfor (int jj = 0; jj < m; jj++)\r\n\t\t\t{\r\n\t\t\t\tdelta += ab[ii][jj]* pow(-1, jj);\r\n\t\t\t}\r\n\t\t}\r\n\t\t// Adding the jump for the z2 elements\r\n\t\tif (pos_z2[ii] == 1)\r\n\t\t{\r\n\t\t\tfor (int jj = 0; jj < m; jj++)\r\n\t\t\t{\r\n\t\t\t\tdelta -= ab[ii][jj];\r\n\t\t\t}\r\n\t\t}\r\n\r\n\t}\r\n\r\n\treturn delta;\r\n}\r\n/* --------------------------------------------------------------------\r\n\t\tTAU UNIFORM STRESS\r\n-----------------------------------------------------------------------*/\r\ninline std::tuple tau_uni(double sigma_11inf)\r\n{\r\n\t// Defining the variables\r\n\tdcomp tau_11, tau_12, phi, psi;\r\n\r\n\t// Get the phi and psi\r\n\tphi = -0.5 * sigma_11inf;\r\n\tpsi = -0.5 * sigma_11inf;\r\n\r\n\t// calculating the tau\r\n\ttau_11 = -phi - psi;\r\n\ttau_12 = -phi - phi;\r\n\r\n\treturn { tau_11, tau_12 };\r\n}\r\n/* --------------------------------------------------------------------\r\n\t\tTAU CRACK\r\n-----------------------------------------------------------------------*/\r\ninline std::tuple tau_crack(dcomp z, dcomp z1, dcomp z2, double L, double mu, int m, dcvec a)\r\n{\r\n\t// Defining the variables\r\n\tdcomp chi, chi_bar, Z, chi_pow;\r\n\tdcomp dphi, dphi_bar, ddphi, dpsi;\r\n\tdcomp tau_11, tau_12, S1, L_frac;\r\n\r\n\t// Getting the chi - and Z - coordinates\r\n\tchi = chi_from_z(z, z1, z2, L, mu);\r\n\tchi_bar = conj(chi);\r\n\tZ = exp(dcomp(0, -1) * mu) * 2.0 * (z - 0.5 * (z1 + z2)) / L;\r\n\r\n\t// Calculating the series\r\n\tdphi = 0;\r\n\tdphi_bar = 0;\r\n\tddphi = 0;\r\n\tdpsi = 0;\r\n\tchi_pow = chi * chi - 1.0;\r\n\tfor (int ii = 0; ii < m; ii++)\r\n\t{\r\n\t\tdouble n = ii + 1.0;\r\n\t\tdcomp beta_n = a[ii] * n;\r\n\t\tdcomp chipow = pow(chi, (1.0 - n)) / chi_pow;\r\n\t\tdphi += conj(beta_n) * chipow;\r\n\t\tdphi_bar += beta_n * conj(chipow);\r\n\t\tddphi -= conj(beta_n) * (pow(chi, (2.0 - n)) / (chi_pow * chi_pow * chi_pow)) * ((n + 1.0) * chi * chi - n + 1.0);\r\n\t\tdpsi -= beta_n * chipow;\r\n\t}\r\n\r\n\t// Multiplying the constants\r\n\tL_frac = (4.0 / L) * exp(dcomp(0, -1) * mu);\r\n\tdphi *= L_frac;\r\n\tdphi_bar *= conj(L_frac);\r\n\tddphi *= (16.0 / (L * L)) * exp(dcomp(0, -2) * mu);\r\n\tdpsi *= L_frac;\r\n\r\n\t// Calcualting tau\r\n\ttau_11 = -0.5 * L * (Z - conj(Z)) * ddphi - exp(dcomp(0, -1) * mu) * (dphi + dpsi);\r\n\ttau_12 = -exp(dcomp(0, 1) * mu) * dphi - exp(dcomp(0, -1) * mu) * dphi_bar;\r\n\r\n\treturn { tau_11, tau_12 };\r\n}\r\n/* --------------------------------------------------------------------\r\n\t\tTAU TOTAL\r\n-----------------------------------------------------------------------*/\r\ninline std::tuple tau_total(dcomp z, double sigma_11inf, dcvec z1, dcvec z2, dvec L, dvec mu, int ma, int na, ddcvec a, int m_not_a)\r\n{\r\n\t// Defining the variables\r\n\tdcomp tau_11, tau_12;\r\n\r\n\t// Add the unifrom stress field\r\n\tstd::tie(tau_11, tau_12) = tau_uni(sigma_11inf);\r\n\r\n\t// Add the analytic element for a crack\r\n\tif (na > 0)\r\n\t{\r\n\t\tfor (int ii = 0; ii < na; ii++)\r\n\t\t{\r\n\t\t\tif (ii != m_not_a)\r\n\t\t\t{\r\n\t\t\t\tdcomp tau_11c, tau_12c;\r\n\t\t\t\tstd::tie(tau_11c, tau_12c) = tau_crack(z, z1[ii], z2[ii], L[ii], mu[ii], ma, a[ii]);\r\n\t\t\t\ttau_11 += tau_11c;\r\n\t\t\t\ttau_12 += tau_12c;\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\treturn { tau_11, tau_12 };\r\n}\r\n/* --------------------------------------------------------------------\r\n\t\tT TOTAL\r\n-----------------------------------------------------------------------*/\r\ninline dcomp T_total(dcomp z, double sigma_11inf, dcvec z1a, dcvec z2a, dvec La, dvec mua, int ma, int na, ddcvec a, int m_not_a, int m_is_a)\r\n{\r\n\t// Defining the variables\r\n\tdcomp tau_11, tau_12, T;\r\n\r\n\t// Get teh taus\r\n\tstd::tie(tau_11, tau_12) = tau_total(z, sigma_11inf, z1a, z2a, La, mua, ma, na, a, m_not_a);\r\n\r\n\t// Calculate the tractions\r\n\tT = dcomp(0, -.5) * (tau_11 * exp(dcomp(0, 2) * mua[m_is_a]) - tau_12);\r\n\r\n\treturn { T };\r\n}\r\n/* --------------------------------------------------------------------\r\n\t\tSOLVE CRACK ANALYTIC ELEMENT A (BETA IN PAPER)\r\n-----------------------------------------------------------------------*/\r\ninline dcvec AE_crack_solver(Eigen::VectorXd T_s, Eigen::VectorXd T_n, Eigen::MatrixXd A, int ma, int Na, dvec pa, double sigma_11inf, dcvec z1a, dcvec z2a, dvec La, dvec mua, int na, ddcvec a_in, dcvec zint, ivec int_check, int m_is_a)\r\n{\r\n\t// Defining the variables\r\n\tdcvec a(ma);\r\n\tEigen::VectorXd b1(ma);\r\n\tEigen::VectorXd b2(ma);\r\n\r\n\t// Count number of intersections\r\n\tint int_count = std::accumulate(int_check.begin(), int_check.end(), 0.0);\r\n\r\n\t// Check if the crack has any intersection\r\n\tif (int_count == 0)\r\n\t{\r\n\t\t// Solving the linear system (without intersections)\r\n\t\tb1 = A.bdcSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(T_s);\r\n\t\tb2 = A.bdcSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(T_n);\r\n\t}\r\n\telse\r\n\t{\r\n\t\t// Define new matrices for A and T and assign the pre-calculated values\r\n\t\tEigen::MatrixXd A_int_s(Na + int_count * 3, ma);\r\n\t\tEigen::MatrixXd A_int_n(Na + int_count, ma);\r\n\t\tEigen::VectorXd T_s_int(Na + int_count * 3);\r\n\t\tEigen::VectorXd T_n_int(Na + int_count);\r\n\t\t#pragma omp parallel for default(none) shared(A_int_s, A_int_n, A, T_n_int, T_n, T_s_int, T_s)\r\n\t\tfor (int ii = 0; ii < Na; ii++)\r\n\t\t{\r\n\t\t\tfor (int jj = 0; jj < ma; jj++)\r\n\t\t\t{\r\n\t\t\t\t/*A_int_re(ii, jj) = A(ii, jj);\r\n\t\t\t\tA_int_im(ii, jj) = A(ii, jj);*/\r\n\t\t\t\tA_int_s(ii, jj) = A(ii, jj);\r\n\t\t\t\tA_int_n(ii, jj) = A(ii, jj);\r\n\t\t\t}\r\n\t\t\tT_s_int(ii) = T_s(ii);\r\n\t\t\tT_n_int(ii) = T_n(ii);\r\n\t\t}\r\n\t\tint cnt_s = 0;\r\n\t\tint cnt_n = 0;\r\n\t\tfor (int ii = 0; ii < na; ii++)\r\n\t\t{\r\n\t\t\t// Check if element ii has any intersections\r\n\t\t\tif (int_check[ii] == 1)\r\n\t\t\t{\r\n\t\t\t\t// Define variables\r\n\t\t\t\tdcomp chia, tau11, chi_pow, L_frac, T_temp;\r\n\t\t\t\tdouble thetaa;\r\n\r\n\t\t\t\t// Compute the intersection angel in the chi-plane\r\n\t\t\t\tchia = chi_from_z(zint[ii], z1a[m_is_a], z2a[m_is_a], La[m_is_a], mua[m_is_a]);\r\n\t\t\t\tthetaa = imag(log(chia));\r\n\r\n\t\t\t\t// Pre-calcualte terms\r\n\t\t\t\tchi_pow = chia /( chia * chia - 1.0 );\r\n\t\t\t\tL_frac = (dcomp(0,8.0) / La[m_is_a]) * exp(dcomp(0, -2) * mua[m_is_a]);\r\n\r\n\t\t\t\t// Add the intersection to the A matrix\r\n\t\t\t\tfor (int jj = 0; jj < ma; jj++)\r\n\t\t\t\t{\r\n\t\t\t\t\tdouble n = jj + 1.0;\r\n\t\t\t\t\t// Add a control point at the intersection - tau^11 condition\r\n\t\t\t\t\tA_int_s(Na + cnt_s, jj) = real( chi_pow * L_frac * n * pow(chia, -n) );\r\n\t\t\t\t\tA_int_s(Na + (cnt_s + 1), jj) = imag( chi_pow * L_frac * n * pow(chia, -n) );\r\n\t\t\t\t\t\r\n\t\t\t\t\t// Add a control point at the intersection - traction condition\r\n\t\t\t\t\tA_int_s(Na + (cnt_s + 2), jj) = n * sin(n * thetaa);\r\n\t\t\t\t\tA_int_n(Na + cnt_n, jj) = n * sin(n * thetaa);\r\n\t\t\t\t}\r\n\r\n\t\t\t\t// Calculate the taus at the intersection\r\n\t\t\t\tdcomp tau_11, tau_12;\r\n\t\t\t\tstd::tie(tau_11, tau_12) = tau_total(zint[ii], sigma_11inf, z1a, z2a, La, mua, ma, na, a_in, m_is_a);\r\n\r\n\t\t\t\t// Add the intersection to the T_s vector\r\n\t\t\t\tT_s_int(Na + cnt_s) = -real(tau_11);\r\n\t\t\t\tT_s_int(Na + (cnt_s + 1)) = -imag(tau_11);\r\n\r\n\t\t\t\t// Calculate the traction at the control point at the intersection\r\n\t\t\t\tT_temp = T_total(zint[ii], sigma_11inf, z1a, z2a, La, mua, ma, na, a_in, m_is_a, m_is_a);\r\n\r\n\t\t\t\t// Add the intersection to the T vectors\r\n\t\t\t\tT_s_int(Na + (cnt_s + 2)) = real(T_temp) * 0.5 * La[m_is_a] * sin(thetaa);\r\n\t\t\t\tT_n_int(Na + cnt_n) = (pa[ii] + imag(T_temp)) * -0.5 * La[m_is_a] * sin(thetaa);\r\n\r\n\t\t\t\t// Add step to loop count\r\n\t\t\t\tcnt_s += 3;\r\n\t\t\t\tcnt_n += 1;\r\n\t\t\t}\r\n\t\t\t\r\n\t\t}\r\n\r\n\t\t// Solving the linear system (with intersections)\r\n\t\tb1 = A_int_s.bdcSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(T_s_int);\r\n\t\tb2 = A_int_n.bdcSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(T_n_int);\r\n\t}\r\n\t// Assign to a\r\n\tfor (int ii = 0; ii < ma; ii++)\r\n\t{\r\n\t\ta[ii] = dcomp(b2[ii], b1[ii]);\r\n\t}\r\n\r\n\r\n\treturn { a };\r\n}\r\n/* --------------------------------------------------------------------\r\n\t\tITERATOR\r\n-----------------------------------------------------------------------*/\r\ninline ddcvec iterator(double cond, int ITR, int Na, dvec pa, double sigma_11inf, dcvec z1a, dcvec z2a, dvec La, dvec mua, int ma, int na)\r\n{\r\n\t// Defining the variables\r\n\tdcomp tau_11, tau_12, T;\r\n\tEigen::MatrixXd A(Na, ma);\r\n\tdvec theta_a(Na);\r\n\tddvec term(na, dvec(Na));\r\n\tddcvec za(na, dcvec(Na));\r\n\tddcvec a(na, dcvec(ma));\r\n\tddcvec zint(na, dcvec(na));\r\n\tiivec int_check(na, ivec(na));\r\n\tivec int_count(na);\r\n\tdouble theta_0a, delthetaa;\r\n\r\n\t// Print the conditions\r\n\tstd::cout << std::scientific;\r\n\tstd::cout << \"Solver for \" << na << \" analytical element cracks with \" << ma << \" coefficients at \" << Na << \" integration points.\" << std::endl;\r\n\tstd::cout << \"Iterations break after: error < \" << cond << \" or iterations > \" << ITR << \".\" << std::endl << std::endl;\r\n\tstd::cout << \"\tError:\" << \"\t\tIteration:\" << std::endl;\r\n\r\n\r\n\t// Assigning variables\r\n\ttheta_0a = pi() / (Na);\r\n\tdelthetaa = (pi() - 2.5*theta_0a) / (Na - 1.0);\r\n\r\n\tif (na > 0)\r\n\t{\r\n\t\t// Find the intersection points\r\n\t\t#pragma omp parallel for default(none) shared(zint, int_check, int_count, z1a, z2a)\r\n\t\tfor (int ii = 0; ii < na; ii++)\r\n\t\t{\r\n\t\t\tfor (int jj = 0; jj < na; jj++)\r\n\t\t\t{\r\n\t\t\t\tstd::tie(zint[ii][jj], int_check[ii][jj]) = intersection_point(z1a[ii], z2a[ii], z1a[jj], z2a[jj]);\r\n\t\t\t}\r\n\t\t\tint_count[ii] = std::accumulate(int_check[ii].begin(), int_check[ii].end(), 0.0);\r\n\t\t}\r\n\r\n\t\t// Calculating the A, a theta and term matrices\r\n\t\t#pragma omp parallel for default(none) shared(theta_a, theta_0a, delthetaa)\r\n\t\tfor (int ii = 0; ii < Na; ii++)\r\n\t\t{\r\n\t\t\ttheta_a[ii] = theta_0a + ii * delthetaa;\r\n\t\t}\r\n\r\n\t\t//#pragma omp parallel for default(none) shared(A, za, term, z1a, z2a, La, mua, theta_a)\r\n\t\tfor (int ii = 0; ii < Na; ii++)\r\n\t\t{\r\n\t\t\tfor (int mm = 0; mm < ma; mm++)\r\n\t\t\t{\r\n\t\t\t\tA(ii, mm) = (mm + 1.0) * sin((mm + 1.0) * theta_a[ii]);\r\n\t\t\t}\r\n\t\t\tfor (int jj = 0; jj < na; jj++)\r\n\t\t\t{\r\n\t\t\t\tdcomp chi;\r\n\t\t\t\tchi = exp(dcomp(0, 1) * theta_a[ii]);\r\n\t\t\t\tza[jj][ii] = z_from_chi(chi, z1a[jj], z2a[jj], La[jj], mua[jj]);\r\n\t\t\t\tterm[jj][ii] = -0.5 * La[jj] * sin(theta_a[ii]);\r\n\t\t\t}\r\n\t\t}\r\n\t\t\r\n\t}\r\n\r\n\t// Sovle the a (betas in paper)\r\n\tdouble error = 1;\r\n\tddcvec a_old(na, dcvec(ma, 0));\r\n\tdouble error_a;\r\n\tint NIT = 0;\r\n\twhile ( error > cond && NIT < ITR )\r\n\t{\r\n\t\t\t\tfor (int ii = 0; ii < na; ii++)\r\n\t\t\t\t{\r\n\t\t\t\t\tEigen::VectorXd T_s(Na);\r\n\t\t\t\t\tEigen::VectorXd T_n(Na);\r\n\t\t\t\t\t#pragma omp parallel for default(none) shared(T_s, T_n, za, sigma_11inf, z1a, z2a, La, mua, ma, na, a)\r\n\t\t\t\t\tfor (int jj = 0; jj < Na; jj++)\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tdcomp T;\r\n\t\t\t\t\t\tT = T_total(za[ii][jj], sigma_11inf, z1a, z2a, La, mua, ma, na, a, ii, ii);\r\n\t\t\t\t\t\tT_s(jj) = -real(T) * term[ii][jj];\r\n\t\t\t\t\t\tT_n(jj) = (pa[ii] + imag(T)) * term[ii][jj];\r\n\t\t\t\t\t}\r\n\t\t\t\t\tdcomp tau_11, tau_12;\r\n\t\t\t\t\ta[ii] = AE_crack_solver(T_s, T_n, A, ma, Na, pa, sigma_11inf, z1a, z2a, La, mua, na, a, zint[ii], int_check[ii], ii);\r\n\t\t\t\t\tstd::tie(tau_11, tau_12) = tau_total(zint[0][1], sigma_11inf, z1a, z2a, La, mua, ma, na, a, -1);\r\n\t\t\t\t}\r\n\r\n\t\t// Calcualte the error\r\n\t\terror_a = 0;\r\n\t\tif (na > 0)\r\n\t\t{\r\n\t\t\tdvec dela(na);\r\n\t\t\t#pragma omp parallel for default(none) shared(dela, a, error_a)\r\n\t\t\tfor (int ii = 0; ii < na; ii++)\r\n\t\t\t{\r\n\t\t\t\tdvec dela_temp(ma);\r\n\t\t\t\tfor (int jj = 0; jj < ma; jj++)\r\n\t\t\t\t{\r\n\t\t\t\t\tdela_temp[jj] = abs(a[ii][jj] - a_old[ii][jj]);\r\n\t\t\t\t}\r\n\t\t\t\tdela[ii] = *max_element(dela_temp.begin(), dela_temp.end());\r\n\t\t\t}\r\n\t\t\terror_a = *max_element(dela.begin(), dela.end());\r\n\t\t}\r\n\t\tdvec error_vec{ error_a };\r\n\t\terror = *max_element(error_vec.begin(), error_vec.end());\r\n\r\n\t\tNIT += 1;\r\n\t\ta_old = a;\r\n\r\n\t\tstd::cout << std::scientific;\r\n\t\tstd::cout << \"\t\" << error << \"\t\" << NIT << std::endl;\r\n\r\n\t}\r\n\r\n\treturn { a };\r\n}\r\n/* --------------------------------------------------------------------\r\n\t\tSTRESS TOTAL\r\n-----------------------------------------------------------------------*/\r\ninline std::tuple sigma_total(dcomp z, double sigma_11inf, dcvec z1a, dcvec z2a, dvec La, dvec mua, int ma, int na, ddcvec a)\r\n{\r\n\t// Defining the variables\r\n\tdcomp S1, S2, tau_11, tau_12;\r\n\tdouble sigma_11, sigma_22, sigma_12;\r\n\r\n\t// Calculating the tau\r\n\tstd::tie(tau_11, tau_12) = tau_total(z, sigma_11inf, z1a, z2a, La, mua, ma, na, a, -1);\r\n\r\n\t// Calculate the sigmas\r\n\tS1 = .5 * (tau_11 + tau_12);\r\n\tS2 = .5 * (-tau_11 + tau_12);\r\n\tsigma_11 = real(S1);\r\n\tsigma_22 = real(S2);\r\n\tsigma_12 = -imag(S1);\r\n\r\n\treturn { sigma_11, sigma_22, sigma_12 };\r\n}\r\n/* --------------------------------------------------------------------\r\n\t\tSTRESS FIELD\r\n-----------------------------------------------------------------------*/\r\nstd::tuple stress_field(double xfrom, double xto, double yfrom, double yto, int Nx, int Ny, double sigma_11inf, dcvec z1a, dcvec z2a, dvec La, dvec mua, int ma, int na, ddcvec a)\r\n{\r\n\t// Defining the variables\r\n\tddvec grid_11(Nx, dvec(Ny));\r\n\tddvec grid_22(Nx, dvec(Ny));\r\n\tddvec grid_12(Nx, dvec(Ny));\r\n\tdouble dx;\r\n\tdouble dy;\r\n\tdvec x_vec(Nx);\r\n\tdvec y_vec(Ny);\r\n\r\n\t// Calcualte the sigma grids\r\n\tdx = (xto - xfrom) / (Nx - 1.0);\r\n\tdy = (yto - yfrom) / (Ny - 1.0);\r\n\t#pragma omp parallel for default(none) shared(grid_11, grid_22, grid_12, x_vec, y_vec, sigma_11inf, z1a, z2a, La, mua, ma, na, a)\r\n\tfor (int ii = 0; ii < Nx; ii++)\r\n\t{\r\n\t\tfor (int jj = Ny; jj--;)\r\n\t\t{\r\n\t\t\tx_vec[ii] = xfrom + ii * dx;\r\n\t\t\ty_vec[jj] = yfrom + jj * dy;\r\n\t\t\tstd::tie(grid_11[ii][jj], grid_22[ii][jj], grid_12[ii][jj]) = sigma_total(dcomp(x_vec[ii], y_vec[jj]), sigma_11inf, z1a, z2a, La, mua, ma, na, a);\r\n\t\t}\r\n\t}\r\n\treturn { x_vec, y_vec, grid_11, grid_22, grid_12 };\r\n}\r\n/* --------------------------------------------------------------------\r\n\t\tPRINCIPAL STRESSES\r\n-----------------------------------------------------------------------*/\r\nstd::tuple principal_sigma(dcomp z, double sigma_11inf, dcvec z1a, dcvec z2a, dvec La, dvec mua, int ma, int na, ddcvec a)\r\n{\r\n\t// Defining the variables\r\n\tdouble sigma_1;\r\n\tdouble sigma_2;\r\n\tdouble theta_p;\r\n\tdouble frac1, frac2, sqrt1;\r\n\tdcomp S1, S2, tau_11, tau_12;\r\n\tdouble sigma_11, sigma_22, sigma_12;\r\n\r\n\t// Calculating the tau\r\n\tstd::tie(tau_11, tau_12) = tau_total(z, sigma_11inf, z1a, z2a, La, mua, ma, na, a, -1);\r\n\r\n\t// Calculate the sigmas\r\n\tS1 = .5 * (tau_11 + tau_12);\r\n\tS2 = .5 * (-tau_11 + tau_12);\r\n\tsigma_11 = real(S1);\r\n\tsigma_22 = real(S2);\r\n\tsigma_12 = -imag(S1);\r\n\r\n\t// Calculating the terms\r\n\tfrac1 = (sigma_11 + sigma_22) / 2.0;\r\n\tfrac2 = (sigma_11 - sigma_22) / 2.0;\r\n\tsqrt1 = sqrt(frac2 * frac2 + sigma_12 * sigma_12);\r\n\r\n\t// Calculating the principal stresses and the angel of sigma\r\n\tsigma_1 = frac1 + sqrt1;\r\n\tsigma_2 = frac1 - sqrt1;\r\n\r\n\t// Calcuating the angel theta_p\r\n\ttheta_p = -0.5 * imag(log(tau_11));\r\n\r\n\t// Changing the absolut sigma is sigma_2 > sigma_1 (TURNED OFF)\r\n\t/*if (abs(sigma_2) > abs(sigma_1))\r\n\t{\r\n\t\tsigma_1 = frac1 - sqrt1;\r\n\t\tsigma_2 = frac1 + sqrt1;\r\n\t\ttheta_p = -0.5*imag(log(tau_11)) + pi() / 2;\r\n\t}\r\n\t*/\r\n\r\n\r\n\r\n\treturn { sigma_1, sigma_2, theta_p };\r\n}\r\n/* --------------------------------------------------------------------\r\n\t\tPRINCIPAL STRESSES PLOT\r\n-----------------------------------------------------------------------*/\r\nstd::tuple principal_sigma_plt(double sigma_11, double sigma_22, double sigma_12)\r\n{\r\n\t// Defining the variables\r\n\tdouble sigma_1;\r\n\tdouble sigma_2;\r\n\tdouble theta_p;\r\n\tdouble frac1, frac2, sqrt1;\r\n\tdcomp tau_11;\r\n\r\n\t// Calculating the tau\r\n\ttau_11 = sigma_11 - sigma_22 - dcomp(0, 2) * sigma_12;\r\n\r\n\t// Calculating the terms\r\n\tfrac1 = (sigma_11 + sigma_22) / 2.0;\r\n\tfrac2 = (sigma_11 - sigma_22) / 2.0;\r\n\tsqrt1 = sqrt(frac2 * frac2 + sigma_12 * sigma_12);\r\n\r\n\t// Calculating the principal stresses and the angel of sigma\r\n\tsigma_1 = frac1 + sqrt1;\r\n\tsigma_2 = frac1 - sqrt1;\r\n\r\n\t// Calcuating the angel theta_p\r\n\ttheta_p = -0.5 * imag(log(tau_11));\r\n\r\n\t// Changing the absolut sigma is sigma_2 > sigma_1 (TURNED OFF)\r\n\t/*if (abs(sigma_2) > abs(sigma_1))\r\n\t{\r\n\t\tsigma_1 = frac1 - sqrt1;\r\n\t\tsigma_2 = frac1 + sqrt1;\r\n\t\ttheta_p = -0.5*imag(log(tau_11)) + pi() / 2;\r\n\t}\r\n\t*/\r\n\r\n\r\n\r\n\treturn { sigma_1, sigma_2, theta_p };\r\n}\r\n/* --------------------------------------------------------------------\r\n\t\tPRINCIPAL STRESS FIELDS\r\n-----------------------------------------------------------------------*/\r\nstd::tuple principal_stress_field(int Nx, int Ny, ddvec grid_11, ddvec grid_22, ddvec grid_12)\r\n{\r\n\t// Defining the variables\r\n\tddvec grid_1(Nx, dvec(Ny));\r\n\tddvec grid_2(Nx, dvec(Ny));\r\n\tddvec grid_tp(Nx, dvec(Ny));\r\n\r\n\t// Calculate teh principal stresses from the stress feilds\r\n\t#pragma omp parallel for default(none) shared(grid_1, grid_2, grid_tp, grid_11, grid_22, grid_12)\r\n\tfor (int ii = 0; ii < Nx; ii++)\r\n\t{\r\n\t\tfor (int jj = Ny; jj--;)\r\n\t\t{\r\n\t\t\tstd::tie(grid_1[ii][jj], grid_2[ii][jj], grid_tp[ii][jj]) = principal_sigma_plt(grid_11[ii][jj], grid_22[ii][jj], grid_12[ii][jj]);\r\n\t\t}\r\n\t}\r\n\treturn { grid_1, grid_2, grid_tp };\r\n}\r\n/* --------------------------------------------------------------------\r\n\t\tPRINCIPAL STRESS TRAJECTORIES\r\n-----------------------------------------------------------------------*/\r\nstd::tuple principal_stress_trajectories(double xfrom, double xto, double yfrom, double yto, dcvec xtraj, dcvec ytraj, int Ntraj, int lvs_traj, double sigma_11inf, dcvec z1a, dcvec z2a, dvec La, dvec mua, int ma, int na, ddcvec a)\r\n{\r\n\t// Defining the variables\r\n\tddcvec traj_1(lvs_traj * 2, dcvec(Ntraj));\r\n\tddcvec traj_2(lvs_traj * 2, dcvec(Ntraj));\r\n\tdouble dx, dy, dx_lvsre, dx_lvsim, dy_lvsre, dy_lvsim, pi_val;\r\n\tdouble cond;\r\n\tint NIT;\r\n\r\n\t// Getting the starting points\r\n\tdx = (xto - xfrom) / (Ntraj * 0.8);\r\n\tdy = (yto - yfrom) / (Ntraj * 0.8);\r\n\tdx_lvsre = (real(xtraj[1]) - real(xtraj[0])) / (lvs_traj - 1.0);\r\n\tdx_lvsim = (imag(xtraj[1]) - imag(xtraj[0])) / (lvs_traj - 1.0);\r\n\tdy_lvsre = (real(ytraj[1]) - real(ytraj[0])) / (lvs_traj - 1.0);\r\n\tdy_lvsim = (imag(ytraj[1]) - imag(ytraj[0])) / (lvs_traj - 1.0);\r\n\tpi_val = 0.5 * pi();\r\n\tcond = 1e-6;\r\n\tNIT = 10;\r\n\r\n\t// SIGMA 1\r\n\t#pragma omp parallel for default(none) shared(traj_1, sigma_11inf, z1a, z2a, La, mua, ma, na, a)\r\n\tfor (int ii = 0; ii < lvs_traj; ii++)\r\n\t{\r\n\t\ttraj_1[ii][0] = dcomp(real(xtraj[0]) + ii * dx_lvsre, imag(xtraj[0]) + ii * dx_lvsim);\r\n\t\tdcomp z = traj_1[ii][0];\r\n\t\tdcomp z_old = z;\r\n\t\tdouble dx1 = dx;\r\n\t\tfor (int jj = 1; jj < Ntraj; jj++)\r\n\t\t{\r\n\t\t\tdcomp zt, z11, z_oldt, sigma, sigmat;\r\n\t\t\tdouble sigma_1, theta_p;\r\n\t\t\tdouble ee, eta;\r\n\t\t\tstd::tie(sigma_1, std::ignore, theta_p) = principal_sigma(z, sigma_11inf, z1a, z2a, La, mua, ma, na, a);\r\n\t\t\tsigma = abs(sigma_1) * exp(dcomp(0, 1) * theta_p);\r\n\t\t\tz11 = z + sigma / abs(sigma) * dx1;\r\n\t\t\teta = angel_change(z11, z, z_old);\r\n\t\t\tif (eta > pi_val && jj > 1) {\r\n\t\t\t\tdx1 = -dx1;\r\n\t\t\t}\r\n\t\t\tzt = z + sigma / abs(sigma) * dx1;\r\n\r\n\t\t\tee = 1;\r\n\t\t\tfor (int rr = NIT; rr--;)\r\n\t\t\t{\r\n\t\t\t\tz_oldt = zt;\r\n\t\t\t\tstd::tie(sigma_1, std::ignore, theta_p) = principal_sigma(zt, sigma_11inf, z1a, z2a, La, mua, ma, na, a);\r\n\t\t\t\tsigmat = abs(sigma_1) * exp(dcomp(0, 1) * theta_p);\r\n\t\t\t\tz11 = z + (sigma + sigmat) / abs(sigma + sigmat) * dx1;\r\n\t\t\t\teta = angel_change(z11, z, z_old);\r\n\t\t\t\tif (eta > pi_val && jj > 1) {\r\n\t\t\t\t\tdx1 = -dx1;\r\n\t\t\t\t}\r\n\t\t\t\tzt = z + (sigma + sigmat) / abs(sigma + sigmat) * dx1;\r\n\t\t\t\tee = std::norm(z_oldt - zt);\r\n\t\t\t\tif (ee < cond)\r\n\t\t\t\t{\r\n\t\t\t\t\tbreak;\r\n\t\t\t\t}\r\n\t\t\t}\r\n\r\n\t\t\ttraj_1[ii][jj] = zt;\r\n\t\t\tz_old = z;\r\n\t\t\tz = zt;\r\n\t\t}\r\n\r\n\t\tint kk = ii + lvs_traj;\r\n\t\ttraj_1[kk][0] = traj_1[ii][0];\r\n\t\tz = traj_1[kk][0];\r\n\t\tz_old = z;\r\n\t\tdx1 = -dx;\r\n\t\tfor (int jj = 1; jj < Ntraj; jj++)\r\n\t\t{\r\n\t\t\tdcomp zt, z11, z_oldt, sigma, sigmat;\r\n\t\t\tdouble sigma_1, theta_p;\r\n\t\t\tdouble ee, eta;\r\n\t\t\tstd::tie(sigma_1, std::ignore, theta_p) = principal_sigma(z, sigma_11inf, z1a, z2a, La, mua, ma, na, a);\r\n\t\t\tsigma = abs(sigma_1) * exp(dcomp(0, 1) * theta_p);\r\n\t\t\tz11 = z + sigma / abs(sigma) * dx1;\r\n\t\t\teta = angel_change(z11, z, z_old);\r\n\t\t\tif (eta > pi_val && jj > 1) {\r\n\t\t\t\tdx1 = -dx1;\r\n\t\t\t}\r\n\t\t\tzt = z + sigma / abs(sigma) * dx1;\r\n\r\n\t\t\tee = 1;\r\n\t\t\tfor (int rr = NIT; rr--;)\r\n\t\t\t{\r\n\t\t\t\tz_oldt = zt;\r\n\t\t\t\tstd::tie(sigma_1, std::ignore, theta_p) = principal_sigma(zt, sigma_11inf, z1a, z2a, La, mua, ma, na, a);\r\n\t\t\t\tsigmat = abs(sigma_1) * exp(dcomp(0, 1) * theta_p);\r\n\t\t\t\tz11 = z + (sigma + sigmat) / abs(sigma + sigmat) * dx1;\r\n\t\t\t\teta = angel_change(z11, z, z_old);\r\n\t\t\t\tif (eta > pi_val && jj > 1) {\r\n\t\t\t\t\tdx1 = -dx1;\r\n\t\t\t\t}\r\n\t\t\t\tzt = z + (sigma + sigmat) / abs(sigma + sigmat) * dx1;\r\n\t\t\t\tee = std::norm(z_oldt - zt);\r\n\t\t\t\tif (ee < cond)\r\n\t\t\t\t{\r\n\t\t\t\t\tbreak;\r\n\t\t\t\t}\r\n\t\t\t}\r\n\r\n\t\t\ttraj_1[kk][jj] = zt;\r\n\t\t\tz_old = z;\r\n\t\t\tz = zt;\r\n\t\t}\r\n\t}\r\n\r\n\t// SIGMA 2\r\n\t#pragma omp parallel for default(none) shared(traj_2, sigma_11inf, z1a, z2a, La, mua, ma, na, a)\r\n\tfor (int ii = 0; ii < lvs_traj; ii++)\r\n\t{\r\n\t\ttraj_2[ii][0] = dcomp(real(ytraj[0]) + ii * dy_lvsre, imag(ytraj[0]) + ii * dy_lvsim);\r\n\t\tdouble dy1 = dy;\r\n\t\tdcomp z = traj_2[ii][0];\r\n\t\tdcomp z_old = z;\r\n\t\tfor (int jj = 1; jj < Ntraj; jj++)\r\n\t\t{\r\n\t\t\tdcomp zt, z11, z_oldt, sigma, sigmat;\r\n\t\t\tdouble sigma_2, theta_p;\r\n\t\t\tdouble ee, eta;\r\n\t\t\tstd::tie(std::ignore, sigma_2, theta_p) = principal_sigma(z, sigma_11inf, z1a, z2a, La, mua, ma, na, a);\r\n\t\t\tsigma = abs(sigma_2) * exp(dcomp(0, 1) * (theta_p + pi_val));\r\n\t\t\tz11 = z + sigma / abs(sigma) * dy1;\r\n\t\t\teta = angel_change(z11, z, z_old);\r\n\t\t\tif (eta > pi_val && jj > 1) {\r\n\t\t\t\tdy1 = -dy1;\r\n\t\t\t}\r\n\t\t\tzt = z + sigma / abs(sigma) * dy1;\r\n\r\n\t\t\tee = 1;\r\n\t\t\tfor (int rr = NIT; rr--;)\r\n\t\t\t{\r\n\t\t\t\tz_oldt = zt;\r\n\t\t\t\tstd::tie(std::ignore, sigma_2, theta_p) = principal_sigma(zt, sigma_11inf, z1a, z2a, La, mua, ma, na, a);\r\n\t\t\t\tsigmat = abs(sigma_2) * exp(dcomp(0, 1) * (theta_p + pi_val));\r\n\t\t\t\tz11 = z + (sigma + sigmat) / abs(sigma + sigmat) * dy1;\r\n\t\t\t\teta = angel_change(z11, z, z_old);\r\n\t\t\t\tif (eta > pi_val && jj > 1) {\r\n\t\t\t\t\tdy1 = -dy1;\r\n\t\t\t\t}\r\n\t\t\t\tzt = z + (sigma + sigmat) / abs(sigma + sigmat) * dy1;\r\n\t\t\t\tee = std::norm(z_oldt - zt);\r\n\t\t\t\tif (ee < cond)\r\n\t\t\t\t{\r\n\t\t\t\t\tbreak;\r\n\t\t\t\t}\r\n\t\t\t}\r\n\r\n\t\t\ttraj_2[ii][jj] = zt;\r\n\t\t\tz_old = traj_2[ii][(int)(jj - 1)];\r\n\t\t\tz = zt;\r\n\t\t}\r\n\r\n\t\tint kk = ii + lvs_traj;\r\n\t\ttraj_2[kk][0] = traj_2[ii][0];\r\n\t\tdy1 = -dy;\r\n\t\tz = traj_2[kk][0];\r\n\t\tz_old = z;\r\n\t\tfor (int jj = 1; jj < Ntraj; jj++)\r\n\t\t{\r\n\t\t\tdcomp zt, z11, z_oldt, sigma, sigmat;\r\n\t\t\tdouble sigma_2, theta_p;\r\n\t\t\tdouble ee, eta;\r\n\t\t\tstd::tie(std::ignore, sigma_2, theta_p) = principal_sigma(z, sigma_11inf, z1a, z2a, La, mua, ma, na, a);\r\n\t\t\tsigma = abs(sigma_2) * exp(dcomp(0, 1) * (theta_p + pi_val));\r\n\t\t\tz11 = z + sigma / abs(sigma) * dy1;\r\n\t\t\teta = angel_change(z11, z, z_old);\r\n\t\t\tif (eta > pi_val && jj > 1) {\r\n\t\t\t\tdy1 = -dy1;\r\n\t\t\t}\r\n\t\t\tzt = z + sigma / abs(sigma) * dy1;\r\n\r\n\t\t\tee = 1;\r\n\t\t\tfor (int rr = NIT; rr--;)\r\n\t\t\t{\r\n\t\t\t\tz_oldt = zt;\r\n\t\t\t\tstd::tie(std::ignore, sigma_2, theta_p) = principal_sigma(zt, sigma_11inf, z1a, z2a, La, mua, ma, na, a);\r\n\t\t\t\tsigmat = abs(sigma_2) * exp(dcomp(0, 1) * (theta_p + pi_val));\r\n\t\t\t\tz11 = z + (sigma + sigmat) / abs(sigma) * dy1;\r\n\t\t\t\teta = angel_change(z11, z, z_old);\r\n\t\t\t\tif (eta > pi_val && jj > 1) {\r\n\t\t\t\t\tdy1 = -dy1;\r\n\t\t\t\t}\r\n\t\t\t\tzt = z + (sigma + sigmat) / abs(sigma + sigmat) * dy1;\r\n\t\t\t\tee = std::norm(z_oldt - zt);\r\n\t\t\t\tif (ee < cond)\r\n\t\t\t\t{\r\n\t\t\t\t\tbreak;\r\n\t\t\t\t}\r\n\t\t\t}\r\n\r\n\t\t\ttraj_2[kk][jj] = zt;\r\n\t\t\tz_old = z;\r\n\t\t\tz = zt;\r\n\t\t}\r\n\t}\r\n\treturn { traj_1, traj_2 };\r\n}\r\n/* --------------------------------------------------------------------\r\n\t\tW UNIFORM STRESS\r\n-----------------------------------------------------------------------*/\r\ninline dcomp w_uni(dcomp z, double kappa, double G, double sigma_11inf)\r\n{\r\n\t// Defining the variables\r\n\tdcomp phi_bar, dphi, psi, w;\r\n\r\n\t// calculating the veriables\r\n\tphi_bar = -0.5 * sigma_11inf * conj(z);\r\n\tdphi = -0.5 * sigma_11inf;\r\n\tpsi = -0.5 * sigma_11inf * z;\r\n\r\n\t// Calculating w\r\n\tw = 1 / (4 * G) * ((z - conj(z)) * dphi + kappa * phi_bar + psi);\r\n\r\n\treturn { w };\r\n}\r\n/* --------------------------------------------------------------------\r\n\t\tW CRACK - ANALYTIC ELEMENT\r\n-----------------------------------------------------------------------*/\r\ninline dcomp w_crack(dcomp z, double kappa, double G, dcomp z1, dcomp z2, double L, double mu, int m, dcvec a)\r\n{\r\n\t// Defining the variables\r\n\tdcomp chi, chi_bar, Z, chi_pow;\r\n\tdcomp dphi, phi_bar, psi;\r\n\tdcomp w, L_frac;\r\n\tdouble n;\r\n\r\n\t// Getting the chi - and Z - coordinates\r\n\tchi = chi_from_z(z, z1, z2, L, mu);\r\n\tchi_bar = conj(chi);\r\n\tZ = exp(dcomp(0, -1) * mu) * 2.0 * (z - 0.5 * (z1 + z2)) / L;\r\n\r\n\t// Calculating the series\r\n\tphi_bar = 0;\r\n\tdphi = 0;\r\n\tpsi = 0;\r\n\tn = 0;\r\n\tchi_pow = chi * chi - 1.0;\r\n\tfor (int ii = 0; ii < m; ii++)\r\n\t{\r\n\t\tdcomp a_n;\r\n\t\tn += 1;\r\n\t\ta_n = a[ii] * n;\r\n\t\tdphi += conj(a_n) * pow(chi, (1.0 - n)) / chi_pow;\r\n\t\tphi_bar -= a[ii] * pow(chi_bar, -n);\r\n\t\tpsi += a[ii] * pow(chi, -n);\r\n\t}\r\n\r\n\t// Multiplying the constants\r\n\tL_frac = (4.0 / L) * exp(dcomp(0, -1) * mu);\r\n\tdphi *= L_frac;\r\n\r\n\t// Calcualting w\r\n\tw = 1 / (4 * G) * (0.5 * L * (Z - conj(Z)) * exp(dcomp(0, 1) * mu) * dphi + kappa * phi_bar + psi);\r\n\r\n\treturn { w };\r\n}\r\n/* --------------------------------------------------------------------\r\n\t\tW TOTAL\r\n-----------------------------------------------------------------------*/\r\ninline dcomp w_total(dcomp z, double kappa, double G, double sigma_11inf, dcvec z1a, dcvec z2a, dvec La, dvec mua, int ma, int na, ddcvec a)\r\n{\r\n\t// Defining the variables\r\n\tdcomp w, wg;\r\n\r\n\t// Add the unfirm stress field\r\n\tw = w_uni(z, kappa, G, sigma_11inf);\r\n\r\n\t// Add the anlytic element for a crack\r\n\tif (na > 0)\r\n\t{\r\n\t\tfor (int ii = 0; ii < na; ii++)\r\n\t\t{\r\n\t\t\tdcomp wc;\r\n\t\t\twc = w_crack(z, kappa, G, z1a[ii], z2a[ii], La[ii], mua[ii], ma, a[ii]);\r\n\t\t\tw += wc * exp(dcomp(0, -1) * mua[ii]);\r\n\t\t}\r\n\t}\r\n\r\n\treturn { w };\r\n}\r\n/* --------------------------------------------------------------------\r\n\t\tDISPLACEMENT FIELD\r\n-----------------------------------------------------------------------*/\r\nstd::tuple w_field(double xfrom, double xto, double yfrom, double yto, int Nw, double kappa, double G, double sigma_11inf, dcvec z1a, dcvec z2a, dvec La, dvec mua, int ma, int na, ddcvec a)\r\n\r\n{\r\n\t// Defining the variables\r\n\tddcvec grid_w(Nw, dcvec(Nw));\r\n\tdvec x_vecw(Nw), y_vecw(Nw);\r\n\tdouble dx;\r\n\tdouble dy;\r\n\r\n\t// Calcualte the displacement grid\r\n\tdx = (xto - xfrom) / (Nw - 1.0);\r\n\tdy = (yto - yfrom) / (Nw - 1.0);\r\n#pragma omp parallel for default(none) shared(grid_w, x_vecw, y_vecw, kappa, G, sigma_11inf, z1a, z2a, La, mua, ma, na, a)\r\n\tfor (int ii = 0; ii < Nw; ii++)\r\n\t{\r\n\t\tfor (int jj = 0; jj < Nw; jj++)\r\n\t\t{\r\n\r\n\t\t\tx_vecw[ii] = xfrom + ii * dx;\r\n\t\t\ty_vecw[jj] = yfrom + jj * dy;\r\n\t\t\tgrid_w[ii][jj] = w_total(dcomp(x_vecw[ii], y_vecw[jj]), kappa, G, sigma_11inf, z1a, z2a, La, mua, ma, na, a);\r\n\t\t}\r\n\t}\r\n\treturn { x_vecw, y_vecw, grid_w };\r\n}\r\n/*---------------------------------------------------------------------\r\n\t\tDISPLACEMENT TRAJECTORIES\r\n-----------------------------------------------------------------------*/\r\nddcvec w_trajectories(double xfrom, double xto, double yfrom, double yto, int Ntraj, int Nw, double kappa, double G, double sigma_11inf, dcvec z1a, dcvec z2a, dvec La, dvec mua, int ma, int na, ddcvec a)\r\n{\r\n\t// Defining the variables\r\n\tddcvec traj_w((int)(Nw * 2), dcvec(Ntraj));\r\n\tdouble dx, dy_lvs;\r\n\tdouble cond, pi_val;\r\n\tint NIT;\r\n\r\n\t// Getting the starting points\r\n\tdx = (xto - xfrom) / (Ntraj);\r\n\tdy_lvs = (yto - yfrom) / (Nw - 1.0);\r\n\tpi_val = 0.5 * pi();\r\n\tcond = 1e-6;\r\n\tNIT = 10;\r\n\r\n\t// w trajectories\r\n#pragma omp parallel for default(none) shared(traj_w, kappa, G, sigma_11inf, z1a, z2a, La, mua, ma, na, a)\r\n\tfor (int ii = 0; ii < Nw; ii++)\r\n\t{\r\n\t\ttraj_w[ii][0] = dcomp(xfrom, yfrom + ii * dy_lvs);\r\n\t\tdcomp z = traj_w[ii][0];\r\n\t\tdcomp z_old = z;\r\n\t\tdouble dx1 = dx;\r\n\t\tfor (int jj = 1; jj < Ntraj; jj++)\r\n\t\t{\r\n\t\t\tdcomp zt, z_oldt, w, w1;\r\n\t\t\tdouble ee;\r\n\t\t\tw = w_total(z, kappa, G, sigma_11inf, z1a, z2a, La, mua, ma, na, a);\r\n\t\t\tzt = z + conj(w) / abs(w) * dx1;\r\n\r\n\t\t\tee = 1;\r\n\t\t\tfor (int rr = NIT; rr--;)\r\n\t\t\t{\r\n\t\t\t\tz_oldt = zt;\r\n\t\t\t\tw1 = w_total(zt, kappa, G, sigma_11inf, z1a, z2a, La, mua, ma, na, a);\r\n\t\t\t\tzt = z + conj(w + w1) / abs(w + w1) * dx1;\r\n\t\t\t\tee = std::norm(z_oldt - zt);\r\n\t\t\t\tif (ee < cond)\r\n\t\t\t\t{\r\n\t\t\t\t\tbreak;\r\n\t\t\t\t}\r\n\t\t\t}\r\n\r\n\t\t\ttraj_w[ii][jj] = zt;\r\n\t\t\tz_old = z;\r\n\t\t\tz = zt;\r\n\t\t}\r\n\r\n\t\tint kk = ii + Nw;\r\n\t\ttraj_w[kk][0] = dcomp(xto, yfrom + ii * dy_lvs);\r\n\t\tz = traj_w[kk][0];\r\n\t\tz_old = z;\r\n\t\tdx1 = dx;\r\n\t\tfor (int jj = 1; jj < Ntraj; jj++)\r\n\t\t{\r\n\t\t\tdcomp zt, z_oldt, w, w1;\r\n\t\t\tdouble ee;\r\n\t\t\tw = w_total(z, kappa, G, sigma_11inf, z1a, z2a, La, mua, ma, na, a);\r\n\t\t\tzt = z + conj(w) / abs(w) * dx1;\r\n\r\n\t\t\tee = 1;\r\n\t\t\tfor (int rr = NIT; rr--;)\r\n\t\t\t{\r\n\t\t\t\tz_oldt = zt;\r\n\t\t\t\tw1 = w_total(zt, kappa, G, sigma_11inf, z1a, z2a, La, mua, ma, na, a);\r\n\t\t\t\tzt = z + conj(w + w1) / abs(w + w1) * dx1;\r\n\t\t\t\tee = std::norm(z_oldt - zt);\r\n\t\t\t\tif (ee < cond)\r\n\t\t\t\t{\r\n\t\t\t\t\tbreak;\r\n\t\t\t\t}\r\n\t\t\t}\r\n\r\n\t\t\ttraj_w[kk][jj] = zt;\r\n\t\t\tz_old = z;\r\n\t\t\tz = zt;\r\n\r\n\t\t}\r\n\t}\r\n\treturn { traj_w };\r\n}\r\n\r\n/* --------------------------------------------------------------------\r\n\r\n\t\tMAIN SCRIPT\r\n\r\n-----------------------------------------------------------------------*/\r\nint main()\r\n{\r\n\t/* --------------------------------------------------------------------\r\n\t\t\tDefining variables and importing data\r\n\t-----------------------------------------------------------------------*/\r\n\r\n\t// Header in console window\r\n\tauto start = std::chrono::high_resolution_clock::now(); // Start the clock\r\n\r\n\tstd::cout << \"=================================================================\" << std::endl << std::endl;\r\n\tstd::cout << \"\t\tANALYTIC ELEMENT LINEAR ELASTIC SOLVER\t\" << std::endl << std::endl;\r\n\tstd::cout << \"=================================================================\" << std::endl << std::endl;\r\n\r\n\tauto date_time1 = std::chrono::system_clock::now();\r\n\tstd::time_t start_time = std::chrono::system_clock::to_time_t(date_time1);\r\n\tchar str_time1[26];\r\n\tctime_s(str_time1, sizeof str_time1, &start_time);\r\n\tstd::cout << \"Program started: \" << str_time1 << std::endl;\r\n\r\n\t// Setting the data types\r\n\tdcomp z;\r\n\tdouble kappa, sigma_11inf, G, cond;\r\n\tint na, ma, Na, NIT;\r\n\r\n\t// Read the input data from binary file PART 1\r\n\tstd::ifstream input_file(\"geometry_data.bin\", std::ios::in | std::ios::binary | std::ios::ate);\r\n\tstd::streampos size = input_file.tellg();\r\n\tchar* memblock = new char[size];\r\n\tinput_file.seekg(0, std::ios::beg);\r\n\tinput_file.read(memblock, size);\r\n\tdouble* fin = (double*)memblock;//reinterpret as doubles\r\n\r\n\tsigma_11inf = fin[0];\r\n\tkappa = fin[1];\r\n\tG = fin[2];\r\n\tna = (int)fin[3];\r\n\tma = (int)fin[4];\r\n\tNa = (int)fin[5];\r\n\tcond = (double)fin[6];\r\n\tNIT = (int)fin[7];\r\n\r\n\t// Declaring the vecotrs\r\n\tdcvec z1a(na), z2a(na);\r\n\tdvec pa(na), La(na), mua(na);\r\n\tddcvec a(na, dcvec(ma));\r\n\r\n\tint pos = 7 + 1;\r\n\tif (na > 0)\r\n\t{\r\n\t\tfor (int ii = 0; ii < na; ii++)\r\n\t\t{\r\n\t\t\tint re = pos + ii;\r\n\t\t\tint im = pos + na + ii;\r\n\t\t\tz1a[ii] = dcomp(fin[re], fin[im]);\r\n\t\t}\r\n\t\tpos += 2 * na;\r\n\t\tfor (int ii = 0; ii < na; ii++)\r\n\t\t{\r\n\t\t\tint re = pos + ii;\r\n\t\t\tint im = pos + na + ii;\r\n\t\t\tz2a[ii] = dcomp(fin[re], fin[im]);\r\n\t\t}\r\n\t\tpos += 2 * na;\r\n\t\tfor (int ii = 0; ii < na; ii++)\r\n\t\t{\r\n\t\t\tpa[ii] = fin[pos + ii];\r\n\t\t}\r\n\t\tpos += na;\r\n\t\tfor (int ii = 0; ii < na; ii++)\r\n\t\t{\r\n\t\t\tLa[ii] = fin[pos + ii];\r\n\t\t}\r\n\t\tpos += na;\r\n\t\tfor (int ii = 0; ii < na; ii++)\r\n\t\t{\r\n\t\t\tmua[ii] = fin[pos + ii];\r\n\t\t}\r\n\t\tpos += na;\r\n\t}\r\n\telse\r\n\t{\r\n\t\tz1a = { dcomp(0, 0) };\r\n\t\tz2a = { dcomp(0, 0) };\r\n\t\tLa = { 0 };\r\n\t\tmua = { 0 };\r\n\t\ta = { { dcomp(0,0) } };\r\n\t\tma = 1;\r\n\t\tNa = 1;\r\n\t}\r\n\r\n\t// Setting the data types\r\n\tdouble xfrom, xto, yfrom, yto;\r\n\tint Nx, Ny, Nw, Ntraj, lvs_traj;\r\n\tddvec grid_11, grid_22, grid_12, grid_1, grid_2, theta_p;\r\n\tddcvec traj_1, traj_2, grid_w, traj_w;\r\n\tdvec x_vec, y_vec, x_vecw, y_vecw;\r\n\tdcvec xtraj, ytraj;\r\n\r\n\t// Read the plot data from binary file\r\n\tstd::ifstream plot_file(\"plot_data.bin\", std::ios::in | std::ios::binary | std::ios::ate);\r\n\tstd::streampos size2 = plot_file.tellg();\r\n\tchar* memblock2 = new char[size2];\r\n\tplot_file.seekg(0, std::ios::beg);\r\n\tplot_file.read(memblock2, size2);\r\n\tdouble* fplot = (double*)memblock2;//reinterpret as doubles\r\n\r\n\txfrom = fplot[0];\r\n\txto = fplot[1];\r\n\tyfrom = fplot[2];\r\n\tyto = fplot[3];\r\n\tNx = (int)fplot[4];\r\n\tNy = (int)fplot[5];\r\n\tNw = (int)fplot[6];\r\n\tNtraj = (int)fplot[7];\r\n\tlvs_traj = (int)fplot[8];\r\n\txtraj = { fplot[9] + dcomp(0,1) * fplot[10], fplot[11] + dcomp(0,1) * fplot[12] };\r\n\tytraj = { fplot[13] + dcomp(0,1) * fplot[14], fplot[15] + dcomp(0,1) * fplot[16] };\r\n\r\n\t// Displying the plot data in the command window\r\n\tstd::cout << \"=================================================================\" << std::endl << std::endl;\r\n\tstd::cout << \"\t\tTHE GEOMETRY DATA\t\" << std::endl << std::endl;\r\n\tstd::cout << \"=================================================================\" << std::endl << std::endl;\r\n\tstd::cout << \"This is the retrived geometry data:\\n\";\r\n\tstd::cout << \"sigma_11inf = \" << sigma_11inf << std::endl;\r\n\tstd::cout << \" kappa = \" << kappa << std::endl;\r\n\tstd::cout << \" G = \" << G << std::endl;\r\n\tstd::cout << \" na = \" << na << std::endl;\r\n\tstd::cout << \" ma = \" << ma << std::endl;\r\n\tstd::cout << \" Na = \" << Na << std::endl;\r\n\t// Displying the plot data in the command window\r\n\tstd::cout << \"=================================================================\" << std::endl << std::endl;\r\n\tstd::cout << \"\t\tTHE READ PLOT DATA\t\" << std::endl << std::endl;\r\n\tstd::cout << \"=================================================================\" << std::endl << std::endl;\r\n\tstd::cout << \"This is the retrived plot data:\\n\";\r\n\tstd::cout << \"x from: \" << xfrom << \" to \" << xto << std::endl;\r\n\tstd::cout << \"y from: \" << yfrom << \" to \" << yto << std::endl;\r\n\tstd::cout << \"x resolution: \" << Nx << std::endl;\r\n\tstd::cout << \"y resolution: \" << Ny << std::endl;\r\n\tstd::cout << \"Total number of points: \" << Nx * Ny << std::endl;\r\n\tstd::cout << \"Number of steps in trajectories: \" << Ntraj << std::endl;\r\n\tstd::cout << \"Number of trajectory levels: \" << lvs_traj << std::endl;\r\n\tstd::cout << \"Total number of trajectory points: \" << Ntraj * lvs_traj * 4 << std::endl;\r\n\tstd::cout << \"sigma_1 starting line from: \" << xtraj[0] << \" to \" << xtraj[1] << std::endl;\r\n\tstd::cout << \"sigma_2 starting line from: \" << ytraj[0] << \" to \" << ytraj[1] << std::endl;\r\n\tstd::cout << \"x and y quiver resolution: \" << Nw << std::endl << std::endl;\r\n\tstd::cout << std::endl;\r\n\r\n\t/* --------------------------------------------------------------------\r\n\t\t\tSolve the system\r\n\t-----------------------------------------------------------------------*/\r\n\tstd::cout << \"=================================================================\" << std::endl << std::endl;\r\n\tstd::cout << \"\t\tINITIALIZING THE SOVLER\t\" << std::endl << std::endl;\r\n\tstd::cout << \"=================================================================\" << std::endl << std::endl;\r\n\r\n\tauto start_solv = std::chrono::high_resolution_clock::now();\r\n\ta = iterator(cond, NIT, Na, pa, sigma_11inf, z1a, z2a, La, mua, ma, na);\r\n\tauto stop_solv = std::chrono::high_resolution_clock::now();\r\n\tauto duration_solv = std::chrono::duration_cast(stop_solv - start_solv);\r\n\r\n\t// Displaying the computation time\r\n\tstd::cout << std::endl;\r\n\tlong long ms_solv = duration_solv.count();\r\n\tlong long s_solv, m_solv, h_solv;\r\n\tstd::tie(ms_solv, s_solv, m_solv, h_solv) = ms_to_time(ms_solv);\r\n\tstd::cout << \"Computations finnished after \";\r\n\ttime_print(ms_solv, s_solv, m_solv, h_solv);\r\n\r\n\tstd::cout << std::endl;\r\n\r\n\t/* --------------------------------------------------------------------\r\n\t\t\tChecking the error\r\n\t-----------------------------------------------------------------------*/\r\n\tdouble error_med_a_re{}, error_mean_a_re{}, error_max_a_re{}, error_med_a_im{}, error_mean_a_im{}, error_max_a_im{}, error_med_int{}, error_mean_int{}, error_max_int{};\r\n\tstd::cout << \"=================================================================\" << std::endl << std::endl;\r\n\tstd::cout << \"\t\tERRORS\t\" << std::endl << std::endl;\r\n\tstd::cout << \"=================================================================\" << std::endl << std::endl;\r\n\tint numa;\r\n\tnuma = (int)round(Na);\r\n\tdcvec T_check(numa* na);\r\n\tif (na > 0)\r\n\t{\r\n\t\t// Calculate the error along the cracks, i.e. T = t_s + i*t_n = 0 + i*p\r\n\t\tdvec T_check_a_re(numa * na), T_check_a_im(numa * na);\r\n\t\tdouble theta_0a = pi()/numa;\r\n\t\tdouble delthetaa = (pi() - 2.0*theta_0a) / (numa-1.0);\r\n\t\tfor (int ii = 0; ii < na; ii++)\r\n\t\t{\r\n\t\t\tfor (int jj = 0; jj < numa; jj++)\r\n\t\t\t{\r\n\t\t\t\tdcomp z, tau_11, tau_12;\r\n\t\t\t\tdouble theta;\r\n\t\t\t\ttheta = theta_0a + (jj) * delthetaa;\r\n\t\t\t\tz = z_from_chi(exp(dcomp(0, 1) * theta), z1a[ii], z2a[ii], La[ii], mua[ii]);\r\n\t\t\t\tT_check[(ii * numa) + jj] = T_total(z, sigma_11inf, z1a, z2a, La, mua, ma, na, a, -1, ii) + dcomp(0, pa[ii]);\r\n\t\t\t\tT_check_a_re[(ii * numa) + jj] = abs(real(T_check[(ii * numa) + jj]));\r\n\t\t\t\tT_check_a_im[(ii * numa) + jj] = abs(imag(T_check[(ii * numa) + jj]));\r\n\t\t\t}\r\n\t\t}\r\n\t\t// Median\r\n\t\tsize_t n1 = T_check_a_re.size() / 2;\r\n\t\tnth_element(T_check_a_re.begin(), T_check_a_re.begin() + n1, T_check_a_re.end());\r\n\t\terror_med_a_re = T_check_a_re[n1];\r\n\t\tnth_element(T_check_a_im.begin(), T_check_a_im.begin() + n1, T_check_a_im.end());\r\n\t\terror_med_a_im = T_check_a_im[n1];\r\n\t\t// Mean\r\n\t\terror_mean_a_re = 1.0 * std::accumulate(T_check_a_re.begin(), T_check_a_re.end(), 0.0) / (n1 * 2);\r\n\t\terror_mean_a_im = 1.0 * std::accumulate(T_check_a_im.begin(), T_check_a_im.end(), 0.0) / (n1 * 2);\r\n\t\t// Max\r\n\t\terror_max_a_re = *max_element(T_check_a_re.begin(), T_check_a_re.end());\r\n\t\terror_max_a_im = *max_element(T_check_a_im.begin(), T_check_a_im.end());\r\n\t\t// Print the results\r\n\t\tstd::cout << \" Analytic Element for a Crack\" << std::endl;\r\n\t\tstd::cout << \" Difference for ts\" << std::endl;\r\n\t\tstd::cout << \" Maximum = \" << error_max_a_re << std::endl;\r\n\t\tstd::cout << \" Mean = \" << error_mean_a_re << std::endl;\r\n\t\tstd::cout << \" Median = \" << error_med_a_re << std::endl;\r\n\t\tstd::cout << \" Difference for tn\" << std::endl;\r\n\t\tstd::cout << \" Maximum = \" << error_max_a_im << std::endl;\r\n\t\tstd::cout << \" Mean = \" << error_mean_a_im << std::endl;\r\n\t\tstd::cout << \" Median = \" << error_med_a_im << std::endl;\r\n\r\n\t\t// Error at the intersection\r\n\t\tddcvec zint(na, dcvec(na));\r\n\t\tiivec int_check(na, ivec(na));\r\n\t\tivec int_count(na);\r\n\r\n\t\t// Find the intersection points\r\n\t\t#pragma omp parallel for default(none) shared(zint, int_check, int_count, z1a, z2a)\r\n\t\tfor (int ii = 0; ii < na; ii++)\r\n\t\t{\r\n\t\t\tfor (int jj = 0; jj < na; jj++)\r\n\t\t\t{\r\n\t\t\t\tstd::tie(zint[ii][jj], int_check[ii][jj]) = intersection_point(z1a[ii], z2a[ii], z1a[jj], z2a[jj]);\r\n\t\t\t}\r\n\t\t\tint_count[ii] = std::accumulate(int_check[ii].begin(), int_check[ii].end(), 0.0);\r\n\t\t}\r\n\t\tint int_sum = std::accumulate(int_count.begin(), int_count.end(), 0.0);\r\n\t\tdvec int_error(int_sum);\r\n\t\tint cnt_int = 0;\r\n\r\n\t\t// Calcualte the error at the intersections, i.e. real(tau^11) = 0\r\n\t\tif (int_sum > 0)\r\n\t\t{\r\n\t\t\tfor (int ii = 0; ii < na; ii++)\r\n\t\t\t{\r\n\t\t\t\tif (int_count[ii] > 0)\r\n\t\t\t\t{\r\n\t\t\t\t\tint cnt_er = 0;\r\n\t\t\t\t\tfor (int jj = 0; jj < na; jj++)\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tif (int_check[ii][jj] == 1)\r\n\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\tdcomp tau_11, tau_12, T;\r\n\t\t\t\t\t\t\tstd::tie(tau_11, tau_12) = tau_total(zint[ii][jj], sigma_11inf, z1a, z2a, La, mua, ma, na, a, -1);\r\n\t\t\t\t\t\t\tT = T_total(z, sigma_11inf, z1a, z2a, La, mua, ma, na, a, -1, ii);\r\n\t\t\t\t\t\t\tstd::cout << \"z = \" << zint[ii][jj] << \" tau_11 = \" << tau_11 << \" tau_12 = \" << tau_12 << \" T = \" << T << std::endl;\r\n\t\t\t\t\t\t\tint_error[cnt_int + cnt_er] = abs(real(tau_11));\r\n\t\t\t\t\t\t\tcnt_er += 1;\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t\tcnt_int += int_count[ii];\r\n\t\t\t}\r\n\r\n\t\t\t// Mean\r\n\t\t\terror_mean_int = std::accumulate(int_error.begin(), int_error.end(), 0.0) / (int_sum);\r\n\t\t\t// Max\r\n\t\t\terror_max_int = *max_element(int_error.begin(), int_error.end());\r\n\t\t\t// Median\r\n\t\t\tsize_t n2 = int_error.size() / 2;\r\n\t\t\tnth_element(int_error.begin(), int_error.begin() + n2, int_error.end());\r\n\t\t\terror_med_int = int_error[n2];\r\n\t\t\t// Print the results\r\n\t\t\tstd::cout << \" Intersection/s:\" << std::endl;\r\n\t\t\tstd::cout << \" Maximum = \" << error_max_int << std::endl;\r\n\t\t\tstd::cout << \" Mean = \" << error_mean_int << std::endl;\r\n\t\t\tstd::cout << \" Median = \" << error_med_int << std::endl;\r\n\t\t}\r\n\t}\r\n\r\n\t// Create/open the two output files\r\n\tstd::ofstream outfile_coef(\"input_data.bin\", std::ios::out | std::ios::binary);\r\n\r\n\t// Save the plot properties\r\n\tdvec prop = { sigma_11inf, kappa, G, 1.0 * na, 1.0 * ma};\r\n\tconst char* pointerprop = reinterpret_cast(&prop[0]);\r\n\tstd::size_t bytesprop = prop.size() * sizeof(prop[0]);\r\n\toutfile_coef.write(pointerprop, bytesprop);\r\n\r\n\r\n\t// saving the coordinates of the cracks\r\n\tif (na > 0)\r\n\t{\r\n\t\tdvec fz1_re(na);\r\n\t\tdvec fz1_im(na);\r\n\t\tfor (int jj = 0; jj < na; jj++)\r\n\t\t{\r\n\t\t\tfz1_re[jj] = real(z1a[jj]);\r\n\t\t\tfz1_im[jj] = imag(z1a[jj]);\r\n\t\t}\r\n\t\tconst char* pointerz1_re = reinterpret_cast(&fz1_re[0]);\r\n\t\tstd::size_t bytesz1_re = fz1_re.size() * sizeof(fz1_re[0]);\r\n\t\toutfile_coef.write(pointerz1_re, bytesz1_re);\r\n\t\tconst char* pointerz1_im = reinterpret_cast(&fz1_im[0]);\r\n\t\tstd::size_t bytesz1_im = fz1_im.size() * sizeof(fz1_im[0]);\r\n\t\toutfile_coef.write(pointerz1_im, bytesz1_im);\r\n\r\n\t\tdvec fz2_re(na);\r\n\t\tdvec fz2_im(na);\r\n\t\tfor (int jj = 0; jj < na; jj++)\r\n\t\t{\r\n\t\t\tfz2_re[jj] = real(z2a[jj]);\r\n\t\t\tfz2_im[jj] = imag(z2a[jj]);\r\n\t\t}\r\n\t\tconst char* pointerz2_re = reinterpret_cast(&fz2_re[0]);\r\n\t\tstd::size_t bytesz2_re = fz2_re.size() * sizeof(fz2_re[0]);\r\n\t\toutfile_coef.write(pointerz2_re, bytesz2_re);\r\n\t\tconst char* pointerz2_im = reinterpret_cast(&fz2_im[0]);\r\n\t\tstd::size_t bytesz2_im = fz2_im.size() * sizeof(fz2_im[0]);\r\n\t\toutfile_coef.write(pointerz2_im, bytesz2_im);\r\n\r\n\t\tdvec fL = La;\r\n\t\tconst char* pointerL = reinterpret_cast(&fL[0]);\r\n\t\tstd::size_t bytesL = fL.size() * sizeof(fL[0]);\r\n\t\toutfile_coef.write(pointerL, bytesL);\r\n\r\n\t\tdvec fmu = mua;\r\n\t\tconst char* pointermu = reinterpret_cast(&fmu[0]);\r\n\t\tstd::size_t bytesmu = fmu.size() * sizeof(fmu[0]);\r\n\t\toutfile_coef.write(pointermu, bytesmu);\r\n\r\n\t\t// Save the a\r\n\t\tfor (int ii = 0; ii < na; ii++)\r\n\t\t{\r\n\t\t\tdvec fbeta_re(ma);\r\n\t\t\tdvec fbeta_im(ma);\r\n\r\n\t\t\tfor (int jj = 0; jj < ma; jj++)\r\n\t\t\t{\r\n\t\t\t\tfbeta_re[jj] = real(a[ii][jj]);\r\n\t\t\t\tfbeta_im[jj] = imag(a[ii][jj]);\r\n\t\t\t}\r\n\t\t\tconst char* pointerbeta_re = reinterpret_cast(&fbeta_re[0]);\r\n\t\t\tstd::size_t bytesbeta_re = fbeta_re.size() * sizeof(fbeta_re[0]);\r\n\t\t\toutfile_coef.write(pointerbeta_re, bytesbeta_re);\r\n\t\t\tconst char* pointerbeta_im = reinterpret_cast(&fbeta_im[0]);\r\n\t\t\tstd::size_t bytesbeta_im = fbeta_im.size() * sizeof(fbeta_im[0]);\r\n\t\t\toutfile_coef.write(pointerbeta_im, bytesbeta_im);\r\n\t\t}\r\n\t}\r\n\r\n\t// Estimate the time of the program\r\n\tstd::cout << \"=================================================================\" << std::endl << std::endl;\r\n\tstd::cout << \"\t\tESTIMATED CALCULATION TIME\t\" << std::endl << std::endl;\r\n\tstd::cout << \"=================================================================\" << std::endl << std::endl;\r\n\tint Ne = (Nx + Ny + Ntraj) / 100;\r\n\tif (Ne < 8)\r\n\t{\r\n\t\tNe = 8;\r\n\t}\r\n\tauto start0 = std::chrono::high_resolution_clock::now();\r\n\tstress_field(xfrom, xto, yfrom, yto, Ne, Ne, sigma_11inf, z1a, z2a, La, mua, ma, na, a);\r\n\tauto stop0 = std::chrono::high_resolution_clock::now();\r\n\tauto start01 = std::chrono::high_resolution_clock::now();\r\n\tprincipal_stress_trajectories(xfrom, xto, yfrom, yto, xtraj, ytraj, Ne, Ne, sigma_11inf, z1a, z2a, La, mua, ma, na, a);\r\n\tauto stop01 = std::chrono::high_resolution_clock::now();\r\n\tauto duration0 = std::chrono::duration_cast(stop0 - start0);\r\n\tlong long ms0 = duration0.count();\r\n\tauto duration01 = std::chrono::duration_cast(stop01 - start01);\r\n\tlong long ms01 = duration01.count();\r\n\tlong long calcs0 = (int)(2 * Nx * Ny) + (int)(Nw * Nw);\r\n\tlong long calcs01 = (int)(Ntraj * lvs_traj) + (int)(Ntraj * Nw);\r\n\tms0 = ((ms0 / ((int)Ne * Ne) * calcs0 + ms01 / ((int)Ne * Ne) * calcs01))*0.7;\r\n\tlong long s0, m0, h0;\r\n\tstd::tie(ms0, s0, m0, h0) = ms_to_time(ms0);\r\n\tstd::cout << \"Estimated calculation time: \";\r\n\ttime_print(ms0, s0, m0, h0);\r\n\tstd::cout << std::endl << std::endl;\r\n\tauto date_time2 = std::chrono::system_clock::now();\r\n\tstd::time_t start_time2 = std::chrono::system_clock::to_time_t(date_time2);\r\n\tchar str_time2[26];\r\n\tctime_s(str_time2, sizeof str_time2, &start_time2);\r\n\tstd::cout << \"Plotting started: \" << str_time2 << std::endl << std::endl;\r\n\r\n\t/* --------------------------------------------------------------------\r\n\t\t\tCalculating the plots\r\n\t-----------------------------------------------------------------------*/\r\n\r\n\tstd::cout << \"=================================================================\" << std::endl << std::endl;\r\n\tstd::cout << \"\t\tCOMPUTING THE PLOTS\t\" << std::endl << std::endl;\r\n\tstd::cout << \"=================================================================\" << std::endl << std::endl;\r\n\r\n\t// Get the Cartisian stress field\r\n\tstd::cout << \"Initiating the stress field calculation\\n\";\r\n\tauto start1 = std::chrono::high_resolution_clock::now();\r\n\tstd::tie(x_vec, y_vec, grid_11, grid_22, grid_12) = stress_field(xfrom, xto, yfrom, yto, Nx, Ny, sigma_11inf, z1a, z2a, La, mua, ma, na, a);\r\n\tauto stop1 = std::chrono::high_resolution_clock::now();\r\n\tauto duration1 = std::chrono::duration_cast(stop1 - start1);\r\n\tstd::cout << \"Completed, time taken by function: stress_field = \";\r\n\tlong long ms1, s1, m1, h1;\r\n\tms1 = duration1.count();\r\n\tstd::tie(ms1, s1, m1, h1) = ms_to_time(ms1);\r\n\ttime_print(ms1, s1, m1, h1);\r\n\tstd::cout << std::endl << std::endl;\r\n\r\n\t// Get the principal stress field\r\n\tstd::cout << \"Initiating the principal stress field calculation\\n\";\r\n\tauto start2 = std::chrono::high_resolution_clock::now();\r\n\tstd::tie(grid_1, grid_2, theta_p) = principal_stress_field(Nx, Ny, grid_11, grid_22, grid_12);\r\n\tauto stop2 = std::chrono::high_resolution_clock::now();\r\n\tauto duration2 = std::chrono::duration_cast(stop2 - start2);\r\n\tstd::cout << \"Completed, time taken by function: principal_stress_field = \";\r\n\tms1 = duration2.count();\r\n\tstd::tie(ms1, s1, m1, h1) = ms_to_time(ms1);\r\n\ttime_print(ms1, s1, m1, h1);\r\n\tstd::cout << std::endl << std::endl;\r\n\r\n\t// Get the stress trajectories\r\n\tstd::cout << \"Initiating the principal stress trajectories calculation\\n\";\r\n\tauto start3 = std::chrono::high_resolution_clock::now();\r\n\tstd::tie(traj_1, traj_2) = principal_stress_trajectories(xfrom, xto, yfrom, yto, xtraj, ytraj, Ntraj, lvs_traj, sigma_11inf, z1a, z2a, La, mua, ma, na, a);\r\n\tauto stop3 = std::chrono::high_resolution_clock::now();\r\n\tauto duration3 = std::chrono::duration_cast(stop3 - start3);\r\n\tstd::cout << \"Completed, time taken by function: principal_stress_trajectories = \";\r\n\tms1 = duration3.count();\r\n\tstd::tie(ms1, s1, m1, h1) = ms_to_time(ms1);\r\n\ttime_print(ms1, s1, m1, h1);\r\n\tstd::cout << std::endl << std::endl;\r\n\r\n\t// Get the displacement field\r\n\tstd::cout << \"Initiating the displacement field calculation\\n\";\r\n\tauto start4 = std::chrono::high_resolution_clock::now();\r\n\tstd::tie(x_vecw, y_vecw, grid_w) = w_field(xfrom, xto, yfrom, yto, Nw, kappa, G, sigma_11inf, z1a, z2a, La, mua, ma, na, a);\r\n\tauto stop4 = std::chrono::high_resolution_clock::now();\r\n\tauto duration4 = std::chrono::duration_cast(stop4 - start4);\r\n\tstd::cout << \"Completed, time taken by function: w_field = \";\r\n\tms1 = duration4.count();\r\n\tstd::tie(ms1, s1, m1, h1) = ms_to_time(ms1);\r\n\ttime_print(ms1, s1, m1, h1);\r\n\tstd::cout << std::endl << std::endl;\r\n\r\n\r\n\t// Get the displacement trajectories\r\n\tstd::cout << \"Initiating the displacement trajectories calculation\\n\";\r\n\tauto start5 = std::chrono::high_resolution_clock::now();\r\n\ttraj_w = w_trajectories(xfrom, xto, yfrom, yto, Ntraj, Nw, kappa, G, sigma_11inf, z1a, z2a, La, mua, ma, na, a);\r\n\tauto stop5 = std::chrono::high_resolution_clock::now();\r\n\tauto duration5 = std::chrono::duration_cast(stop5 - start5);\r\n\tstd::cout << \"Completed, time taken by function: w_trajectories = \";\r\n\tms1 = duration5.count();\r\n\tstd::tie(ms1, s1, m1, h1) = ms_to_time(ms1);\r\n\ttime_print(ms1, s1, m1, h1);\r\n\tstd::cout << std::endl << std::endl;\r\n\r\n\t// Displaying the computation time\r\n\tstd::cout << \"=================================================================\" << std::endl << std::endl;\r\n\tlong long mstime = duration1.count() + duration2.count() + duration3.count() + duration4.count() + duration5.count();\r\n\tlong long stime, mtime, htime;\r\n\tstd::tie(mstime, stime, mtime, htime) = ms_to_time(mstime);\r\n\tstd::cout << \"Total calculation time: \";\r\n\ttime_print(mstime, stime, mtime, htime);\r\n\tstd::cout << std::endl << std::endl;\r\n\r\n\t/* --------------------------------------------------------------------\r\n\t\t\tSaving the data as binary files\r\n\t-----------------------------------------------------------------------*/\r\n\r\n\t// Create/open the two output files\r\n\tstd::ofstream outfile(\"data.bin\", std::ios::out | std::ios::binary);\r\n\tstd::ofstream outfiledim(\"dim_data.bin\", std::ios::out | std::ios::binary);\r\n\r\n\t// Save the x and y vectors\r\n\tdvec fx = x_vec;\r\n\tconst char* pointerx = reinterpret_cast(&fx[0]);\r\n\tstd::size_t bytesx = fx.size() * sizeof(fx[0]);\r\n\toutfile.write(pointerx, bytesx);\r\n\r\n\tdvec fy = y_vec;\r\n\tconst char* pointery = reinterpret_cast(&fy[0]);\r\n\tstd::size_t bytesy = fy.size() * sizeof(fy[0]);\r\n\toutfile.write(pointery, bytesy);\r\n\r\n\tdvec fxw = x_vecw;\r\n\tconst char* pointerxw = reinterpret_cast(&fxw[0]);\r\n\tstd::size_t bytesxw = fxw.size() * sizeof(fxw[0]);\r\n\toutfile.write(pointerxw, bytesxw);\r\n\r\n\tdvec fyw = y_vecw;\r\n\tconst char* pointeryw = reinterpret_cast(&fyw[0]);\r\n\tstd::size_t bytesyw = fyw.size() * sizeof(fyw[0]);\r\n\toutfile.write(pointeryw, bytesyw);\r\n\r\n\t// Save the grids\r\n\tfor (size_t ii = 0; ii < grid_11.size(); ii++)\r\n\t{\r\n\t\tdvec fg11 = grid_11[ii];\r\n\t\tconst char* pointerg11 = reinterpret_cast(&fg11[0]);\r\n\t\tstd::size_t bytesg11 = fg11.size() * sizeof(fg11[0]);\r\n\t\toutfile.write(pointerg11, bytesg11);\r\n\t}\r\n\tfor (size_t ii = 0; ii < grid_22.size(); ii++)\r\n\t{\r\n\t\tdvec fg22 = grid_22[ii];\r\n\t\tconst char* pointerg22 = reinterpret_cast(&fg22[0]);\r\n\t\tstd::size_t bytesg22 = fg22.size() * sizeof(fg22[0]);\r\n\t\toutfile.write(pointerg22, bytesg22);\r\n\t}\r\n\tfor (size_t ii = 0; ii < grid_12.size(); ii++)\r\n\t{\r\n\t\tdvec fg12 = grid_12[ii];\r\n\t\tconst char* pointerg12 = reinterpret_cast(&fg12[0]);\r\n\t\tstd::size_t bytesg12 = fg12.size() * sizeof(fg12[0]);\r\n\t\toutfile.write(pointerg12, bytesg12);\r\n\t}\r\n\tfor (size_t ii = 0; ii < grid_1.size(); ii++)\r\n\t{\r\n\t\tdvec fg1 = grid_1[ii];\r\n\t\tconst char* pointerg1 = reinterpret_cast(&fg1[0]);\r\n\t\tstd::size_t bytesg1 = fg1.size() * sizeof(fg1[0]);\r\n\t\toutfile.write(pointerg1, bytesg1);\r\n\t}\r\n\tfor (size_t ii = 0; ii < grid_2.size(); ii++)\r\n\t{\r\n\t\tdvec fg2 = grid_2[ii];\r\n\t\tconst char* pointerg2 = reinterpret_cast(&fg2[0]);\r\n\t\tstd::size_t bytesg2 = fg2.size() * sizeof(fg2[0]);\r\n\t\toutfile.write(pointerg2, bytesg2);\r\n\t}\r\n\tfor (size_t ii = 0; ii < theta_p.size(); ii++)\r\n\t{\r\n\t\tdvec fgtp = theta_p[ii];\r\n\t\tconst char* pointergtp = reinterpret_cast(&fgtp[0]);\r\n\t\tstd::size_t bytesgtp = fgtp.size() * sizeof(fgtp[0]);\r\n\t\toutfile.write(pointergtp, bytesgtp);\r\n\t}\r\n\tfor (size_t ii = 0; ii < traj_1.size(); ii++)\r\n\t{\r\n\t\tdvec fgt1_re(Ntraj);\r\n\t\tdvec fgt1_im(Ntraj);\r\n\r\n\t\tfor (size_t jj = 0; jj < traj_1[0].size(); jj++)\r\n\t\t{\r\n\t\t\tfgt1_re[jj] = real(traj_1[ii][jj]);\r\n\t\t\tfgt1_im[jj] = imag(traj_1[ii][jj]);\r\n\t\t}\r\n\t\tconst char* pointergt1_re = reinterpret_cast(&fgt1_re[0]);\r\n\t\tstd::size_t bytesgt1_re = fgt1_re.size() * sizeof(fgt1_re[0]);\r\n\t\toutfile.write(pointergt1_re, bytesgt1_re);\r\n\t\tconst char* pointergt1_im = reinterpret_cast(&fgt1_im[0]);\r\n\t\tstd::size_t bytesgt1_im = fgt1_im.size() * sizeof(fgt1_im[0]);\r\n\t\toutfile.write(pointergt1_im, bytesgt1_im);\r\n\t}\r\n\tfor (size_t ii = 0; ii < traj_2.size(); ii++)\r\n\t{\r\n\t\tdvec fgt2_re(Ntraj);\r\n\t\tdvec fgt2_im(Ntraj);\r\n\t\tfor (size_t jj = 0; jj < traj_2[0].size(); jj++)\r\n\t\t{\r\n\t\t\tfgt2_re[jj] = real(traj_2[ii][jj]);\r\n\t\t\tfgt2_im[jj] = imag(traj_2[ii][jj]);\r\n\t\t}\r\n\t\tconst char* pointergt2_re = reinterpret_cast(&fgt2_re[0]);\r\n\t\tstd::size_t bytesgt2_re = fgt2_re.size() * sizeof(fgt2_re[0]);\r\n\t\toutfile.write(pointergt2_re, bytesgt2_re);\r\n\t\tconst char* pointergt2_im = reinterpret_cast(&fgt2_im[0]);\r\n\t\tstd::size_t bytesgt2_im = fgt2_im.size() * sizeof(fgt2_im[0]);\r\n\t\toutfile.write(pointergt2_im, bytesgt2_im);\r\n\t}\r\n\tfor (size_t ii = 0; ii < grid_w.size(); ii++)\r\n\t{\r\n\t\tdvec fgw_re(Nw);\r\n\t\tdvec fgw_im(Nw);\r\n\t\tfor (size_t jj = 0; jj < grid_w[0].size(); jj++)\r\n\t\t{\r\n\t\t\tfgw_re[jj] = real(grid_w[ii][jj]);\r\n\t\t\tfgw_im[jj] = imag(grid_w[ii][jj]);\r\n\t\t}\r\n\t\tconst char* pointergw_re = reinterpret_cast(&fgw_re[0]);\r\n\t\tstd::size_t bytesgw_re = fgw_re.size() * sizeof(fgw_re[0]);\r\n\t\toutfile.write(pointergw_re, bytesgw_re);\r\n\t\tconst char* pointergw_im = reinterpret_cast(&fgw_im[0]);\r\n\t\tstd::size_t bytesgw_im = fgw_im.size() * sizeof(fgw_im[0]);\r\n\t\toutfile.write(pointergw_im, bytesgw_im);\r\n\t}\r\n\tfor (size_t ii = 0; ii < traj_w.size(); ii++)\r\n\t{\r\n\t\tdvec fgtw_re(Ntraj);\r\n\t\tdvec fgtw_im(Ntraj);\r\n\t\tfor (size_t jj = 0; jj < traj_w[0].size(); jj++)\r\n\t\t{\r\n\t\t\tfgtw_re[jj] = real(traj_w[ii][jj]);\r\n\t\t\tfgtw_im[jj] = imag(traj_w[ii][jj]);\r\n\t\t}\r\n\t\tconst char* pointergtw_re = reinterpret_cast(&fgtw_re[0]);\r\n\t\tstd::size_t bytesgtw_re = fgtw_re.size() * sizeof(fgtw_re[0]);\r\n\t\toutfile.write(pointergtw_re, bytesgtw_re);\r\n\t\tconst char* pointergtw_im = reinterpret_cast(&fgtw_im[0]);\r\n\t\tstd::size_t bytesgtw_im = fgtw_im.size() * sizeof(fgtw_im[0]);\r\n\t\toutfile.write(pointergtw_im, bytesgtw_im);\r\n\t}\r\n\tif (na > 0)\r\n\t{\r\n\t\tdvec fT_res(numa * na);\r\n\t\tdvec fT_ims(numa * na);\r\n\t\tfor (int jj = 0; jj < numa * na; jj++)\r\n\t\t{\r\n\t\t\tfT_res[jj] = real(T_check[jj]);\r\n\t\t\tfT_ims[jj] = imag(T_check[jj]);\r\n\t\t}\r\n\t\tconst char* pointerT_res = reinterpret_cast(&fT_res[0]);\r\n\t\tstd::size_t bytesT_res = fT_res.size() * sizeof(fT_res[0]);\r\n\t\toutfile.write(pointerT_res, bytesT_res);\r\n\t\tconst char* pointerT_ims = reinterpret_cast(&fT_ims[0]);\r\n\t\tstd::size_t bytesT_ims = fT_ims.size() * sizeof(fT_ims[0]);\r\n\t\toutfile.write(pointerT_ims, bytesT_ims);\r\n\t}\r\n\t\r\n\r\n\t// Save the plot properties\r\n\tdvec dim = { 1.0 * Nx, 1.0 * Ny, 1.0 * Nw, 1.0 * Ntraj, 1.0 * lvs_traj, 1.0 * na, error_med_a_re, error_mean_a_re, error_max_a_re, error_med_a_im, error_mean_a_im, error_max_a_im, error_med_int, error_mean_int, error_max_int};\r\n\tconst char* pointerdim = reinterpret_cast(&dim[0]);\r\n\tstd::size_t bytesdim = dim.size() * sizeof(dim[0]);\r\n\toutfiledim.write(pointerdim, bytesdim);\r\n\r\n\t// saving the coordinates of the cracks\r\n\tif (na > 0)\r\n\t{\r\n\t\tdvec fz1_res(na);\r\n\t\tdvec fz1_ims(na);\r\n\t\tfor (int jj = 0; jj < na; jj++)\r\n\t\t{\r\n\t\t\tfz1_res[jj] = real(z1a[jj]);\r\n\t\t\tfz1_ims[jj] = imag(z1a[jj]);\r\n\t\t}\r\n\t\tconst char* pointerz1_res = reinterpret_cast(&fz1_res[0]);\r\n\t\tstd::size_t bytesz1_res = fz1_res.size() * sizeof(fz1_res[0]);\r\n\t\toutfiledim.write(pointerz1_res, bytesz1_res);\r\n\t\tconst char* pointerz1_ims = reinterpret_cast(&fz1_ims[0]);\r\n\t\tstd::size_t bytesz1_ims = fz1_ims.size() * sizeof(fz1_ims[0]);\r\n\t\toutfiledim.write(pointerz1_ims, bytesz1_ims);\r\n\r\n\t\tdvec fz2_res(na);\r\n\t\tdvec fz2_ims(na);\r\n\t\tfor (int jj = 0; jj < na; jj++)\r\n\t\t{\r\n\t\t\tfz2_res[jj] = real(z2a[jj]);\r\n\t\t\tfz2_ims[jj] = imag(z2a[jj]);\r\n\t\t}\r\n\t\tconst char* pointerz2_re = reinterpret_cast(&fz2_res[0]);\r\n\t\tstd::size_t bytesz2_re = fz2_res.size() * sizeof(fz2_res[0]);\r\n\t\toutfiledim.write(pointerz2_re, bytesz2_re);\r\n\t\tconst char* pointerz2_im = reinterpret_cast(&fz2_ims[0]);\r\n\t\tstd::size_t bytesz2_im = fz2_ims.size() * sizeof(fz2_ims[0]);\r\n\t\toutfiledim.write(pointerz2_im, bytesz2_im);\r\n\r\n\t}\r\n\r\n\t// Close the output files\r\n\toutfile.close();\r\n\toutfiledim.close();\r\n\r\n\t// Get the date and execution time\r\n\tauto end = std::chrono::high_resolution_clock::now();\r\n\tauto date_time3 = std::chrono::system_clock::now();\r\n\tauto elapsed_seconds = std::chrono::duration_cast(end - start);\r\n\tlong long mseconds = elapsed_seconds.count();\r\n\tlong long seconds, hours, minutes;\r\n\tstd::tie(mseconds, seconds, minutes, hours) = ms_to_time(mseconds);\r\n\tstd::time_t end_time = std::chrono::system_clock::to_time_t(date_time3);\r\n\tchar str_time[26];\r\n\tctime_s(str_time, sizeof str_time, &end_time);\r\n\r\n\tstd::cout << \"Program finnished after \";\r\n\ttime_print(mseconds, seconds, minutes, hours);\r\n\tstd::cout << std::endl;\r\n\tstd::cout << \"Output data saved to binary files: data.bin and dim_data.bin\" << std::endl;\r\n\r\n\treturn 0;\r\n}", "meta": {"hexsha": "c539f7d7be8dd983f59d0e034aa4e3c9e49afafb", "size": 61312, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "AE_LE_master.cpp", "max_stars_repo_name": "eriktoller/AE_LE_crack", "max_stars_repo_head_hexsha": "e8d8c067da2fd1d184e8926b8075fe9f58e3250e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "AE_LE_master.cpp", "max_issues_repo_name": "eriktoller/AE_LE_crack", "max_issues_repo_head_hexsha": "e8d8c067da2fd1d184e8926b8075fe9f58e3250e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "AE_LE_master.cpp", "max_forks_repo_name": "eriktoller/AE_LE_crack", "max_forks_repo_head_hexsha": "e8d8c067da2fd1d184e8926b8075fe9f58e3250e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2142857143, "max_line_length": 247, "alphanum_fraction": 0.5420472338, "num_tokens": 20282, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7745833945721304, "lm_q2_score": 0.6442251133170356, "lm_q1q2_score": 0.4990060751417248}} {"text": "#include \"potgen.hpp\"\n#include \"global.hpp\"\n#include \"profiling.hpp\"\n#include \"dynamic_grid.hpp\"\n#include \"fft.hpp\"\n#include \"multiindex.hpp\"\n#include \"discretize.hpp\"\n#include \"randomize.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n// defined here so can be inlined\ninline double pow_small(double base, int exponent)\n{\n switch(exponent)\n {\n case 0:\n return 1;\n case 1:\n return base;\n case 2:\n return base*base;\n case 3:\n return base*base*base;\n default:\n return std::pow(base, exponent);\n }\n}\n\n// -------------------------------------------------------------------------------------------------------------\n// calculate the derivative in position space if the fourier transform is given\n// this assumes that the original function was defined inside [-1, 1]^N\n/// takes order_per_dir by value to make calling from multiple threads easier.\n/// threads safe, if \\p f_k is not changed when function is run, otherwise undefined behaviour\ndefault_grid calculateDerivative( std::vector order_per_dir, const complex_grid& f_k )\n{\n std::size_t dimension = f_k.getDimension();\n // argument checks\n if(order_per_dir.size() != f_k.getDimension())\n THROW_EXCEPTION( std::invalid_argument, \"derivation index %1% count does not match data dimension %2%\", order_per_dir.size(), f_k.getDimension() );\n\n /// \\todo maybe just enable fft access and disable after the function...\n if( f_k.getAccessMode() != TransformationType::FFT_INDEX )\n THROW_EXCEPTION( std::invalid_argument, \"grid is not in fft index mode\" );\n\n /// \\todo this might throw. Catch and generate more detailed error message\n auto der_grid = f_k.clone();\n\n std::size_t total_order = std::accumulate( std::begin(order_per_dir), std::end(order_per_dir), (size_t)0 );\n for( unsigned i = 0; i < dimension; ++i)\n {\n if(order_per_dir[i] < 0)\n THROW_EXCEPTION( std::invalid_argument, \"negative order of derivative %1% supplied\", i );\n }\n\n // the actual calculation starts here\n {\n MultiIndex index( dimension );\n for(unsigned i = 0; i < dimension; ++i)\n {\n // get extents returns unsigned ints, so we need to explicitly convert so singed to apply the unary minus.\n index.setLowerBoundAt( i, -(int)(f_k.getExtents()[i]/2) );\n index.setUpperBoundAt( i, (int)f_k.getExtents()[i]/2 );\n }\n\n\n PROFILE_BLOCK(\"derivative calculation\");\n complex_t i_factor = std::pow( complex_t(0, pi), total_order );\n\n for(index.init() ;index.valid(); ++index)\n {\n /// \\todo add comment how/why this works\n // f'(k) = i k f(k)\n double r_factor = 1;\n // this is pushed into another function\n for(unsigned dir = 0; dir < dimension; ++dir)\n {\n // if no derivative in that direction, factor is 1 so no computation needed\n if(order_per_dir[dir] == 0)\n continue;\n\n r_factor *= pow_small(2*index[dir], order_per_dir[dir]);\n }\n\n der_grid(index) *= r_factor * i_factor;\n }\n }\n\n ifft(der_grid);\n\n default_grid result( f_k.getExtents(), TransformationType::FFT_INDEX );\n std::transform(der_grid.begin(), der_grid.end(), result.begin(), (double(*)(const complex_t&))&std::real);\n return std::move(result);\n}\n\n\n// -------------------------------------------------------------------------------------------------------------\n// first step of potential generation: generate the new potential in k - space\n// -------------------------------------------------------------------------------------------------------------\ncomplex_grid generatePotentialInKSpace( std::vector sizes, std::vector support, correlation_fn cor_fun, const PGOptions& opt )\n{\n // create discrete correlation function and fourier transform -> power spectrum\n // load discretized function data into correlation array\n auto grid = discretizeFunctionForFFT(sizes, support, cor_fun);\n\n /// \\todo logging\n\n // calculate fft of correlation\n fft(grid);\n\n // calculate randomize potential in momentum space\n // potential is square root of power spectrum\n {\n PROFILE_BLOCK(\"power spectrum\")\n\n for(auto& v : grid )\n {\n double real = std::real(v);\n /// \\todo actually measure the error here and return it in PGResult\n if( real < -1e-5 || std::abs(std::imag(v)) > 1e-5)\n {\n THROW_EXCEPTION( std::runtime_error, \"power spectrum contains negative or imaginary components, check correlation function!\" );\n }\n // it is faster (and better?) to use v as a nonnegative real here for sqrt calculation\n v = real < 0 ? 0 : std::sqrt(real);\n }\n }\n\n // randomize phases\n if( opt.randomize )\n {\n randomizePhases(grid, opt.randomSeed);\n }\n\n return std::move(grid);\n}\n\n\n// -------------------------------------------------------------------------------------------------------------\n// takes a potential in k-space and calculates all requested derivatives, stores inside a Potential datatype\n/// \\todo write tests for this function\n\nvoid calculateAllDerivatives(Potential &potential, const complex_grid &potential_k, unsigned int max_order)\n{\n PROFILE_BLOCK(\"calculate all derivatives\");\n\n typedef std::future f_type;\n std::vector started_tasks;\n std::vector task_orders;\n\n // calculation function\n auto calc_deriv = [&potential_k](MultiIndex order)\n {\n auto deriv = calculateDerivative(order.getAsVector(), potential_k);\n\n // use same scale factor as for potential\n std::size_t vec_element_count = potential_k.getElementCount();\n double factor = std::sqrt(vec_element_count);\n scaleVectorBy( deriv, factor );\n return std::move(deriv);\n };\n\n for(MultiIndex order( potential.getDimension(), 0, max_order + 1 ); order.valid(); ++order)\n {\n // check total order of derivative\n std::size_t total_order = order.getAccumulated();\n if(total_order <= max_order && total_order > 0)\n {\n started_tasks.push_back( std::async(std::launch::async, calc_deriv, order) );\n task_orders.push_back( order );\n }\n }\n\n for(unsigned i = 0; i < task_orders.size(); ++i)\n {\n try\n {\n potential.setDerivative( task_orders[i], std::move(started_tasks[i].get()) );\n } catch (const std::bad_alloc& e)\n {\n std::cerr << \"bad alloc called in multi threaded derivative calculation. probably ran out of memory. \"\n \"retry as sequential calculation to reduce memory footprint.\";\n\n potential.setDerivative( task_orders[i], std::move( calc_deriv(task_orders[i]) ) );\n }\n }\n}\n\n\n// -------------------------------------------------------------------------------------------------------------\n\nPotential generatePotential( std::vector sizes, std::vector support, const PGOptions& opt )\n{\n Potential res(sizes, std::vector(sizes.size(), 1.0));\n res.setCreationInfo(opt.randomSeed, 3, opt.corrlength);\n\n // setup threads\n setFFTThreads(opt.numThreads);\n\n // calculate the potential in k-space\n auto potential_k = generatePotentialInKSpace(sizes, support, opt.cor_fun, opt);\n\n // calculate derivatives in k-space\n calculateAllDerivatives( res, potential_k, opt.maxDerivativeOrder );\n\n std::size_t vec_element_count = potential_k.getElementCount();\n\n // calculate potential in position space\n auto& cpotential_x = potential_k;\n ifft(cpotential_x);\n\n // this requires additional memory again\n /// \\todo actually measure the error here and return it in PGResult\n double averageComplexPart = 0;\n double average = 0;\n\n default_grid potential_x(sizes, TransformationType::IDENTITY);\n\n auto it = potential_x.begin();\n for( auto& value : cpotential_x)\n {\n *it = std::real(value);\n ++it;\n\n average += std::real( value );\n averageComplexPart += std::imag( value );\n }\n\n average /= vec_element_count;\n averageComplexPart /= vec_element_count;\n\n // calculate average and shift\n double variance = 0.0;\n for( auto& d : potential_x )\n {\n d -= average;\n variance += d*d;\n }\n\n if(opt.verbose)\n std::cout << \"original quality: \" << average << \" \" << variance << \"\\n\";\n res.scalePotential( std::sqrt(1.0/variance) );\n\n scaleVectorBy( potential_x, std::sqrt(vec_element_count / variance) );\n // take scaling into account\n if( opt.verbose )\n std::cout << \"the average imaginary component in the result was \" << averageComplexPart * std::sqrt(vec_element_count / variance)<< \"\\n\";\n\n res.setPotential( std::move(potential_x) );\n\n res.setSupport( support );\n return std::move(res);\n}\n\n", "meta": {"hexsha": "d245d518bc04e2e0ee95623ad53ee54b79cd3487", "size": 9167, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/potgen/potgen.cpp", "max_stars_repo_name": "ngc92/branchedflowsim", "max_stars_repo_head_hexsha": "d38c0e7f892d07d0abd9b63d30570c41b3b83b34", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/potgen/potgen.cpp", "max_issues_repo_name": "ngc92/branchedflowsim", "max_issues_repo_head_hexsha": "d38c0e7f892d07d0abd9b63d30570c41b3b83b34", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/potgen/potgen.cpp", "max_forks_repo_name": "ngc92/branchedflowsim", "max_forks_repo_head_hexsha": "d38c0e7f892d07d0abd9b63d30570c41b3b83b34", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.122605364, "max_line_length": 151, "alphanum_fraction": 0.6007417912, "num_tokens": 2018, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7745833841649233, "lm_q2_score": 0.6442251201477016, "lm_q1q2_score": 0.49900607372806105}} {"text": "#include \n#include \n#include \"geometrycentral/surface/manifold_surface_mesh.h\"\n#include \"geometrycentral/utilities/mesh_data.h\"\n#include \"geometrycentral/surface/edge_length_geometry.h\"\n#include \"geometrycentral/surface/heat_method_distance.h\"\n#include \n#include \n#include \n\nusing namespace geometrycentral;\nusing namespace geometrycentral::surface;\ntypedef int iptr_t;\ntypedef int dptr_t;\n\n// mesh data\nstatic Eigen::MatrixX3i faces;\nstatic Eigen::MatrixX3d edges;\nstatic EdgeData edgeLengths;\n\n// mesh containers\nstatic std::unique_ptr mesh;\nstatic std::unique_ptr geometry;\n\n// the Heat Method solver\nstatic std::unique_ptr heatSolver;\n\n// the output vertex data\nstatic VertexData distToSource;\n\n// parameters\nstatic double timeStep = 1.0;\nstatic bool robust = false;\nstatic bool verbose = true;\n\nstd::string getExceptionMessage(intptr_t exceptionPtr) {\n return std::string(reinterpret_cast(exceptionPtr)->what());\n}\n\nEMSCRIPTEN_BINDINGS(Bindings) {\n emscripten::function(\"getExceptionMessage\", &getExceptionMessage);\n};\n\nextern \"C\" {\n\n EMSCRIPTEN_KEEPALIVE\n iptr_t allocate_faces(size_t num_faces){\n faces.resize(num_faces, 3);\n edges.resize(num_faces, 3);\n return reinterpret_cast(&faces(0, 0));\n }\n EMSCRIPTEN_KEEPALIVE\n void set_face(size_t f, size_t idx0, size_t idx1, size_t idx2){\n faces(f, 0) = idx0;\n faces(f, 1) = idx1;\n faces(f, 2) = idx2;\n }\n EMSCRIPTEN_KEEPALIVE\n void set_face_edges(size_t f, double e0, double e1, double e2){\n edges(f, 0) = e0;\n edges(f, 1) = e1;\n edges(f, 2) = e2;\n }\n EMSCRIPTEN_KEEPALIVE\n void print_faces(){\n std::cout << \"Faces:\\n\" << faces << \"\\n\";\n }\n EMSCRIPTEN_KEEPALIVE\n dptr_t get_edge_ptr(){\n return reinterpret_cast(&edges(0, 0));\n }\n EMSCRIPTEN_KEEPALIVE\n void print_edges(){\n std::cout << \"Edges:\\n\" << edges << \"\\n\";\n }\n\n EMSCRIPTEN_KEEPALIVE\n iptr_t allocate_edges(size_t num_edges){\n edges.resize(num_edges, 3);\n return reinterpret_cast(&edges(0, 0));\n }\n\n EMSCRIPTEN_KEEPALIVE\n void set_verbose(bool v = true){\n verbose = v;\n }\n EMSCRIPTEN_KEEPALIVE\n void set_quiet(){\n set_verbose(false);\n }\n EMSCRIPTEN_KEEPALIVE\n void set_time_step(double step){\n timeStep = step;\n }\n EMSCRIPTEN_KEEPALIVE\n void set_robust(bool flag){\n robust = flag;\n }\n\n EMSCRIPTEN_KEEPALIVE\n void create_surface_mesh(){\n // create underlying mesh topology\n mesh.reset(new ManifoldSurfaceMesh(faces));\n mesh->compress();\n if(verbose)\n mesh->printStatistics();\n }\n\n EMSCRIPTEN_KEEPALIVE \n void precompute(){\n\n // create implicit geometry using edge lengths and mesh\n edgeLengths = EdgeData(*mesh);\n for(size_t i = 0; i < faces.rows(); ++i){\n if(verbose)\n printf(\"Setting lengths of face #%zu\\n\", i);\n Face f = mesh->face(i);\n if(!f.isTriangle()){\n printf(\"Face is not a triangle\\n\");\n return;\n }\n Halfedge he = f.halfedge(); edgeLengths[he.edge()] = edges(i, 0);\n he = he.next(); edgeLengths[he.edge()] = edges(i, 1);\n he = he.next(); edgeLengths[he.edge()] = edges(i, 2);\n }\n if(verbose){\n std::cout << \"Edges:\\n\" << edgeLengths.raw() << \"\\n\";\n for(Edge e : mesh->edges()){\n printf(\"Edge #%zu = %g\\n\", e.getIndex(), edgeLengths[e]);\n }\n }\n geometry.reset(new EdgeLengthGeometry(*mesh, edgeLengths));\n\n // create heat method distance solver (precomputation happens here)\n heatSolver.reset(new HeatMethodDistanceSolver(*geometry, timeStep, robust));\n }\n\n EMSCRIPTEN_KEEPALIVE\n dptr_t compute_from_source(size_t srcIndex){\n const Vertex v = mesh->vertex(srcIndex);\n distToSource = heatSolver->computeDistance(v);\n\n if(verbose)\n printf(\"Returning result pointer\\n\");\n\n Eigen::VectorXd &mat = distToSource.raw();\n double* ptr = &mat(0, 0);\n return reinterpret_cast(ptr);\n }\n\n}", "meta": {"hexsha": "3ed87792df857a53153bcfa7325d15f40e6d2b17", "size": 4047, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "libs/geodesic-dist/gdist.cpp", "max_stars_repo_name": "xionluhnis/knitsketching", "max_stars_repo_head_hexsha": "3e13670caa911b6d5e3c9c036c0ee15184e7d138", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2021-08-16T05:19:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-09T17:46:12.000Z", "max_issues_repo_path": "libs/geodesic-dist/gdist.cpp", "max_issues_repo_name": "xionluhnis/knitsketching", "max_issues_repo_head_hexsha": "3e13670caa911b6d5e3c9c036c0ee15184e7d138", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5.0, "max_issues_repo_issues_event_min_datetime": "2021-08-25T07:11:39.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-19T09:16:05.000Z", "max_forks_repo_path": "libs/geodesic-dist/gdist.cpp", "max_forks_repo_name": "xionluhnis/knitsketching", "max_forks_repo_head_hexsha": "3e13670caa911b6d5e3c9c036c0ee15184e7d138", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2021-08-12T11:49:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-19T07:51:53.000Z", "avg_line_length": 26.8013245033, "max_line_length": 81, "alphanum_fraction": 0.6849518162, "num_tokens": 1113, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7745833945721303, "lm_q2_score": 0.6442251064863697, "lm_q1q2_score": 0.49900606985080437}} {"text": "//\n// Copyright (c) 2016,2018 CNRS\n//\n\n#ifndef __pinocchio_math_fwd_hpp__\n#define __pinocchio_math_fwd_hpp__\n\n#include \"pinocchio/fwd.hpp\"\n#include \n#include \n#include \"pinocchio/math/sincos.hpp\"\n\n#ifdef PINOCCHIO_WITH_CPPAD_SUPPORT\nnamespace boost\n{\n namespace math\n {\n namespace constants\n {\n namespace detail\n {\n template\n struct constant_pi< CppAD::AD > : constant_pi {};\n \n#if defined(PINOCCHIO_WITH_CPPADCG_SUPPORT) && defined(PINOCCHIO_WITH_CXX11_SUPPORT)\n template\n struct constant_pi< CppAD::cg::CG > : constant_pi {};\n#endif\n }\n }\n }\n}\n#endif\n\nnamespace pinocchio\n{\n ///\n /// \\brief Returns the value of PI according to the template parameters Scalar\n ///\n /// \\tparam Scalar The scalar type of the return pi value\n ///\n template\n const Scalar PI()\n { return boost::math::constants::pi(); }\n \n /// The value of PI for double scalar type\n const double PId = PI();\n \n namespace math\n {\n using std::fabs;\n using std::sqrt;\n using std::atan;\n using std::acos;\n using std::asin;\n using std::pow;\n using std::cos;\n using std::sin;\n \n#ifdef PINOCCHIO_WITH_CPPAD_SUPPORT\n using CppAD::fabs;\n using CppAD::sqrt;\n using CppAD::atan;\n using CppAD::acos;\n using CppAD::asin;\n using CppAD::atan2;\n using CppAD::pow;\n using CppAD::cos;\n using CppAD::sin;\n#else\n using std::atan2;\n#endif\n }\n}\n\n#endif //#ifndef __pinocchio_math_fwd_hpp__\n", "meta": {"hexsha": "04b940add52dcf3b9c09ca3e7460e3488f95caf8", "size": 1610, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/math/fwd.hpp", "max_stars_repo_name": "matthieuvigne/pinocchio", "max_stars_repo_head_hexsha": "01f211eceda3ac2e5edc8cf101690afb6f3184d3", "max_stars_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-06-22T15:42:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-22T15:42:45.000Z", "max_issues_repo_path": "src/math/fwd.hpp", "max_issues_repo_name": "matthieuvigne/pinocchio", "max_issues_repo_head_hexsha": "01f211eceda3ac2e5edc8cf101690afb6f3184d3", "max_issues_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/math/fwd.hpp", "max_forks_repo_name": "matthieuvigne/pinocchio", "max_forks_repo_head_hexsha": "01f211eceda3ac2e5edc8cf101690afb6f3184d3", "max_forks_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-03-21T09:14:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-21T09:14:26.000Z", "avg_line_length": 20.9090909091, "max_line_length": 84, "alphanum_fraction": 0.6633540373, "num_tokens": 437, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7745833737577158, "lm_q2_score": 0.6442251201477016, "lm_q1q2_score": 0.49900606702347655}} {"text": "#include \n#include \n#include \n\n#include \n\ntypedef CGAL::Exact_predicates_exact_constructions_kernel K;\ntypedef K::Point_2 Point;\ntypedef K::Segment_2 Segment;\n\ntypedef CGAL::Creator_uniform_2 Pt_creator;\ntypedef CGAL::Random_points_on_segment_2 P1;\ntypedef CGAL::Random_points_on_circle_2 P2;\ntypedef CGAL::Creator_uniform_2< Point, Segment> Seg_creator;\ntypedef CGAL::Join_input_iterator_2< P1, P2, Seg_creator> Seg_iterator;\n\nstruct Intersector{\n typedef CGAL::cpp11::result_of::type result_type;\n const Segment& s;\n K::Intersect_2 intersect;\n\n Intersector(const Segment& seg): s(seg) {}\n\n result_type\n operator() ( const Segment& other) const\n {\n return intersect(s, other);\n }\n};\n\nint main()\n{\n std::vector input;\n\n // Prepare point generator for the horizontal segment, length 200.\n P1 p1( Point(-100,0), Point(100,0));\n\n // Prepare point generator for random points on circle, radius 250.\n P2 p2( 250);\n\n // Create segments.\n Seg_iterator g( p1, p2);\n std::copy_n( g, 200, std::back_inserter(input));\n\n\n // splitting results with Dispatch_output_iterator\n std::vector points;\n std::vector segments;\n\n typedef CGAL::Dispatch_output_iterator<\n std::tuple, std::tuple< std::back_insert_iterator >,\n std::back_insert_iterator > > >\n Dispatcher;\n\n Dispatcher disp = CGAL::dispatch_output( std::back_inserter(points),\n std::back_inserter(segments) );\n\n // intersects the first segment of input with all other segments\n // The resulting points or segments are written in the vectors with the same names\n std::transform( input.begin(), input.end(), disp,\n Intersector(input.front()) );\n\n\n std::cout << \"Point intersections: \" << points.size() << std::endl;\n std::cout << \"Segment intersections: \" << segments.size() << std::endl;\n\n\n return 0;\n}\n", "meta": {"hexsha": "283fd5b9aada7804969b838c9f505e33ca69dc2e", "size": 2252, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Kernel_23/examples/Kernel_23/intersections.cpp", "max_stars_repo_name": "gaschler/cgal", "max_stars_repo_head_hexsha": "d1fe2afa18da5524db6d4946f42ca4b8d00e0bda", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2020-12-12T09:30:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-04T05:00:23.000Z", "max_issues_repo_path": "Kernel_23/examples/Kernel_23/intersections.cpp", "max_issues_repo_name": "gaschler/cgal", "max_issues_repo_head_hexsha": "d1fe2afa18da5524db6d4946f42ca4b8d00e0bda", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Kernel_23/examples/Kernel_23/intersections.cpp", "max_forks_repo_name": "gaschler/cgal", "max_forks_repo_head_hexsha": "d1fe2afa18da5524db6d4946f42ca4b8d00e0bda", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-12-02T11:11:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-02T11:11:36.000Z", "avg_line_length": 32.1714285714, "max_line_length": 99, "alphanum_fraction": 0.6758436945, "num_tokens": 552, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7745833841649233, "lm_q2_score": 0.6442250928250375, "lm_q1q2_score": 0.4990060525643794}} {"text": "//==================================================================================================\n/*!\n\n Copyright 2016 NumScale SAS\n\n Distributed under the Boost Software License, Version 1.0.\n (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)\n*/\n//==================================================================================================\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nSTF_CASE_TPL (\" sqrt1pm1\", STF_IEEE_TYPES)\n{\n namespace bs = boost::simd;\n namespace bd = boost::dispatch;\n using bs::sqrt1pm1;\n\n using r_t = decltype(sqrt1pm1(T()));\n\n // return type conformity test\n STF_TYPE_IS(r_t, T);\n\n // specific values tests\n#ifndef BOOST_SIMD_NO_INVALIDS\n STF_ULP_EQUAL(sqrt1pm1(bs::Inf()), bs::Inf(), 0);\n STF_ULP_EQUAL(sqrt1pm1(bs::Minf()), bs::Nan(), 0);\n STF_ULP_EQUAL(sqrt1pm1(bs::Nan()), bs::Nan(), 0);\n#endif\n STF_ULP_EQUAL(sqrt1pm1(bs::Mone()), bs::Mone(), 0);\n STF_ULP_EQUAL(sqrt1pm1(bs::One()), bs::Sqrt_2()-bs::One(), 2);\n STF_ULP_EQUAL(sqrt1pm1(bs::Zero()), bs::Zero(), 0);\n STF_ULP_EQUAL(sqrt1pm1(bs::Eps()), bs::Halfeps(), 0.5);\n}\n", "meta": {"hexsha": "6154005534ceca9575f82549efaa4a4172ab8894", "size": 1610, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/function/scalar/sqrt1pm1.cpp", "max_stars_repo_name": "TobiasLudwig/boost.simd", "max_stars_repo_head_hexsha": "c04d0cc56747188ddb9a128ccb5715dd3608dbc1", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 6.0, "max_stars_repo_stars_event_min_datetime": "2018-02-25T22:23:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-15T15:13:12.000Z", "max_issues_repo_path": "test/function/scalar/sqrt1pm1.cpp", "max_issues_repo_name": "remymuller/boost.simd", "max_issues_repo_head_hexsha": "3caefb7ee707e5f68dae94f8f31f72f34b7bb5de", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/function/scalar/sqrt1pm1.cpp", "max_forks_repo_name": "remymuller/boost.simd", "max_forks_repo_head_hexsha": "3caefb7ee707e5f68dae94f8f31f72f34b7bb5de", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 7.0, "max_forks_repo_forks_event_min_datetime": "2017-12-12T12:36:31.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-10T14:27:07.000Z", "avg_line_length": 35.7777777778, "max_line_length": 100, "alphanum_fraction": 0.6149068323, "num_tokens": 472, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7310585669110202, "lm_q2_score": 0.6825737344123242, "lm_q1q2_score": 0.49900137609057704}} {"text": "// Copyright 2017 Rainer Gemulla\n// \n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n// \n// http://www.apache.org/licenses/LICENSE-2.0\n// \n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n/** \\file\n *\n * Illustrates matrix factorization with SGD. We first creates factors and then a data matrix\n * from these factors. THis process ensures that we know the best factorization of the input.\n * We then try to reconstruct the factors using SGD.\n */\n#include \n\n#include \n#include \n\n#include \n#include \n\n#include \n\nusing namespace std;\nusing namespace mf;\nusing namespace rg;\nusing namespace boost::numeric::ublas;\n\nlog4cxx::LoggerPtr logger(log4cxx::Logger::getLogger(\"main\"));\n\nint main(int argc, char* argv[]) {\n#ifndef NDEBUG\n\tLOG4CXX_WARN(logger, \"Warning: Debug mode activated (runtimes may be slow).\");\n#endif\n\n\t// parameters for the factorization\n\tmf_size_type size1 = 10000;\n\tmf_size_type size2 = 10000;\n\tmf_size_type nnz = 1000000;\n\tdouble sigma = 1; // standard deviation\n\tdouble lambda = 1/sigma/sigma;\n\tmf_size_type r = 10;\n\n\t// parameters for SGD\n\tdouble eps0 = 0.1;\n\tunsigned epochs = 20;\n\tSgdOrder order = SGD_ORDER_WOR;\n\ttypedef UpdateTruncate Update;\n\ttypedef RegularizeL2 Regularize;\n\ttypedef SumLoss Loss;\n\ttypedef NzslLoss TestLoss;\n\tUpdate update = Update(UpdateNzsl(), -10*sigma, 10*sigma); // truncate for numerical stability\n\tRegularize regularize = Regularize(lambda);\n\tLoss loss((NzslLoss()), L2Loss(lambda));\n\tTestLoss testLoss;\n\tmf_size_type testNnz = nnz/100;\n\tBalanceType balanceType = BALANCE_NONE;\n\tBalanceMethod balanceMethod = BALANCE_SIMPLE;\n\n\n\t// generate original factors by sampling from a normal(0,sigma) distribution\n\tRandom32 random; // note: this takes a default seed (not randomized!)\n\tDenseMatrix wIn(size1, r);\n\tDenseMatrixCM hIn(r, size2);\n\tgenerateRandom(wIn, random, boost::normal_distribution<>(0, sigma));\n\tgenerateRandom(hIn, random, boost::normal_distribution<>(0, sigma));\n\n\t// generate a sparse matrix by selecting random entries from the generated factors\n\t// and add small Gaussian noise\n\tSparseMatrix v;\n\tgenerateRandom(v, nnz, wIn, hIn, random);\n\taddRandom(v, random, boost::normal_distribution<>(0, 0.1));\n\tLOG4CXX_INFO(logger, \"Data matrix: \"\n\t\t<< v.size1() << \" x \" << v.size2() << \", \" << v.nnz() << \" nonzeros\");\n\tLOG4CXX_INFO(logger, \"Loss with original factors: \" << loss((FactorizationData<>(v, wIn, hIn))));\n\n\t// create a test matrix (without noise)\n\tSparseMatrix vTest;\n\tgenerateRandom(vTest, testNnz, wIn, hIn, random);\n\tLOG4CXX_INFO(logger, \"Test matrix: \"\n\t\t<< v.size1() << \" x \" << v.size2() << \", \" << vTest.nnz() << \" nonzeros\");\n\n\t// take a small sample and remove empty rows/columns\n\tProjectedSparseMatrix Vsample;\n\tprojectRandomSubmatrix(random, v, Vsample, v.size1()/5, v.size2()/5);\n\tprojectFrequent(Vsample, 0);\n\tLOG4CXX_INFO(logger, \"Sample matrix: \"\n\t\t<< Vsample.data.size1() << \" x \" << Vsample.data.size2()\n\t\t<< \", \" << Vsample.data.nnz() << \" nonzeros\");\n\n\t// generate initial factors by sampling from a uniform[-0.5,0.5] distribution\n\tDenseMatrix w(size1, r);\n\tDenseMatrixCM h(r, size2);\n\tgenerateRandom(w, random, boost::uniform_real<>(-0.5, 0.5));\n\tgenerateRandom(h, random, boost::uniform_real<>(-0.5, 0.5));\n\n\t// initialize the SGD\n\tTimer t;\n\tSgdRunner sgdRunner(random);\n\tSgdJob job(v, w, h, update, regularize, order);\n\tFactorizationData<> testJob(vTest, w, h);\n\tDecayAuto decay(job, loss, Vsample, eps0, 4);\n\tTrace trace;\n\n\t// print the test loss\n\tLOG4CXX_INFO(logger, \"Initial test loss: \"\n\t\t\t<< testLoss(FactorizationData<>(vTest,w,h)));\n\n\t// run SGD to try to reconstruct the original factors\n\tt.start();\n\tsgdRunner.run(job, loss, epochs, decay, trace, balanceType, balanceMethod, &testJob, &testLoss);\n\tt.stop();\n\tLOG4CXX_INFO(logger, \"Total time: \" << t);\n\n\t// print the test loss\n\tLOG4CXX_INFO(logger, \"Final test loss: \"\n\t\t\t<< testLoss(FactorizationData<>(vTest,w,h)));\n\n\t// write trace to an R file\n\tLOG4CXX_INFO(logger, \"Writing trace to \" << \"/tmp/sgd-trace.R\");\n\ttrace.toRfile(\"/tmp/sgd-trace.R\", \"sgd\");\n\n\treturn 0;\n}\n\n", "meta": {"hexsha": "1e83a71f1d64e0574831584f1e27a5b764e81212", "size": 4646, "ext": "cc", "lang": "C++", "max_stars_repo_path": "examples/mf/sgd.cc", "max_stars_repo_name": "Hui-Li/DSGDPP", "max_stars_repo_head_hexsha": "0ce5b115bfbed81cee1c39fbfa4a8f67a5e1b72e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 14.0, "max_stars_repo_stars_event_min_datetime": "2017-01-10T11:39:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-02T23:03:55.000Z", "max_issues_repo_path": "examples/mf/sgd.cc", "max_issues_repo_name": "Hui-Li/DSGDPP", "max_issues_repo_head_hexsha": "0ce5b115bfbed81cee1c39fbfa4a8f67a5e1b72e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/mf/sgd.cc", "max_forks_repo_name": "Hui-Li/DSGDPP", "max_forks_repo_head_hexsha": "0ce5b115bfbed81cee1c39fbfa4a8f67a5e1b72e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 6.0, "max_forks_repo_forks_event_min_datetime": "2017-10-27T18:40:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-05T15:10:56.000Z", "avg_line_length": 35.196969697, "max_line_length": 98, "alphanum_fraction": 0.713086526, "num_tokens": 1299, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7310585669110203, "lm_q2_score": 0.6825737279551494, "lm_q1q2_score": 0.49900137137000417}} {"text": "//////////////////////////////////////////////////////////////////////////////\n/// Copyright 2003 and onward LASMEA UMR 6602 CNRS/U.B.P Clermont-Ferrand\n/// Copyright 2009 and onward LRI UMR 8623 CNRS/Univ Paris Sud XI\n///\n/// Distributed under the Boost Software License, Version 1.0\n/// See accompanying file LICENSE.txt or copy at\n/// http://www.boost.org/LICENSE_1_0.txt\n//////////////////////////////////////////////////////////////////////////////\n#ifndef NT2_TOOLBOX_IEEE_FUNCTION_SCALAR_NEXTPOW2_HPP_INCLUDED\n#define NT2_TOOLBOX_IEEE_FUNCTION_SCALAR_NEXTPOW2_HPP_INCLUDED\n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n\n\n/////////////////////////////////////////////////////////////////////////////\n// Implementation when type A0 is arithmetic_\n/////////////////////////////////////////////////////////////////////////////\nNT2_REGISTER_DISPATCH(tag::nextpow2_, tag::cpu_,\n (A0),\n (arithmetic_)\n )\n\nnamespace nt2 { namespace ext\n{\n template\n struct call : callable\n {\n template struct result;\n template\n struct result :\n meta::as_integer::type, signed>{};\n\n NT2_FUNCTOR_CALL(1)\n {\n return nt2::nextpow2(tofloat(a0));\n }\n };\n} }\n\n/////////////////////////////////////////////////////////////////////////////\n// Implementation when type A0 is real_\n/////////////////////////////////////////////////////////////////////////////\nNT2_REGISTER_DISPATCH(tag::nextpow2_, tag::cpu_,\n (A0),\n (real_)\n )\n\nnamespace nt2 { namespace ext\n{\n template\n struct call : callable\n {\n template struct result;\n template\n struct result :\n meta::as_integer::type, signed>{};\n\n NT2_FUNCTOR_CALL(1)\n {\n typedef typename meta::as_integer::type int_type;\n A0 m;\n int_type p;\n boost::fusion::tie(m, p) = nt2::frexp(nt2::abs(a0));\n return (m == Half()) ? minusone(p) : p;\n }\n };\n} }\n\n#endif\n// modified by jt the 26/12/2010", "meta": {"hexsha": "00c625b5ddb78a058b09da82d1707f6be4c00b2b", "size": 2769, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "modules/ieee/include/nt2/toolbox/ieee/function/scalar/nextpow2.hpp", "max_stars_repo_name": "brycelelbach/nt2", "max_stars_repo_head_hexsha": "73d7e8dd390fa4c8d251c6451acdae65def70e0b", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2022-03-24T03:35:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T03:35:10.000Z", "max_issues_repo_path": "modules/ieee/include/nt2/toolbox/ieee/function/scalar/nextpow2.hpp", "max_issues_repo_name": "brycelelbach/nt2", "max_issues_repo_head_hexsha": "73d7e8dd390fa4c8d251c6451acdae65def70e0b", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "modules/ieee/include/nt2/toolbox/ieee/function/scalar/nextpow2.hpp", "max_forks_repo_name": "brycelelbach/nt2", "max_forks_repo_head_hexsha": "73d7e8dd390fa4c8d251c6451acdae65def70e0b", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6125, "max_line_length": 89, "alphanum_fraction": 0.5258215962, "num_tokens": 611, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7310585669110202, "lm_q2_score": 0.6825737214979745, "lm_q1q2_score": 0.49900136664943107}} {"text": "/*\n This program is free software; you can redistribute it and/or modify it under\n the terms of the European Union Public Licence - EUPL v.1.1 as published by\n the European Commission.\n\n This program is distributed in the hope that it will be useful, but WITHOUT\n ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n FOR A PARTICULAR PURPOSE. See the European Union Public Licence - EUPL v.1.1\n for more details.\n\n You should have received a copy of the European Union Public Licence - EUPL v.1.1\n along with this program.\n\n Further information about the European Union Public Licence - EUPL v.1.1 can\n also be found on the world wide web at http://ec.europa.eu/idabc/eupl\n\n*/\n\n/*\n ------ Copyright (C) 2011 STA Steering Board (space.trajectory.analysis AT gmail.com) ----\n*/\n/*\n------------------ Author: Guillermo Ortega ----------------------------------------\n May 2011\n\n */\n\n#include \"serviceDistanceRateUnit.h\"\n\n#include \n\nusing namespace Eigen;\n\n#include \"QDebug\"\n\nDialogServiceDistanceRateUnitFrame::DialogServiceDistanceRateUnitFrame( QWidget * parent, Qt::WindowFlags f) : QFrame(parent,f)\n{\n\tsetupUi(this);\n distanceRateUnitWidget = DialogServiceDistanceRateUnitFrame::comboBoxDistanceRateUnitsChoice;\n myPastUnits = 0;\n comboBoxDistanceRateUnitsChoice->setCurrentIndex(myPastUnits);\n}\n\nDialogServiceDistanceRateUnitFrame::~DialogServiceDistanceRateUnitFrame()\n{\n}\n\n\n// Index meaning is as follows:\n// index = 0 is Kilometers/s\n// index = 1 is meters/s\n// index = 2 is centi-meters/s\n// index = 3 is mili-meters/s\n// index = 4 is Astronomical Units/s\n\n\n\n// Matrix coefficients as follows\n// Km->Km, Km->m, Km->cm, Km->mm, Km->AU,\n// m->Km, m->m, m->cm, m->mm, m->AU,\n// cm->Km, cm->m, cm->cm, cm->mm, cm-> AU,\n// mm->Km, mm->m, mm->cm, mm->mm, mm->AU,\n// AU->Km, AU->m, AU->cm, AU->mm, AU->AU\n// The matrice has to be transposed !!\n\n\nstatic double distanceRateConversionMatrixCoeffs[25] =\n{1.0, 0.001, 0.00001, 0.000001, 1.495978707e+08,\n 1000.0, 1.0, 0.01, 0.001, 1.495978707e+11,\n 100000.0, 100.0, 1.0, 0.1, 1.495978707e+13,\n 1000000.0, 1000.0, 10.0, 1.0, 1.495978707e+14,\n 0.6684587e-19, 0.6684587e-12, 0.6684587e-13, 0.6684587e-16, 1.0};\n\nstatic const Matrix distanceRateConversionMatrix(distanceRateConversionMatrixCoeffs);\n\n\n\ndouble DialogServiceDistanceRateUnitFrame::convertDistanceRate(int fromDistanceRateUnit, int toDistanceRateUnit, double distance)\n{\n double finalDistanceRate = distance * distanceRateConversionMatrix(fromDistanceRateUnit, toDistanceRateUnit);\n return finalDistanceRate;\n}\n\n\n//// Sets the input distance, the output distance and the current index inside the method\nvoid DialogServiceDistanceRateUnitFrame::setInputDistanceRate(double niceInputDistanceRate)\n{\n myPastDistanceRate = niceInputDistanceRate;\n}\n\n\n\n// Index meaning is as follows:\n// index = 0 is Kilometers/s\n// index = 1 is meters/s\n// index = 2 is centi-meters/s\n// index = 3 is mili-meters/s\n// index = 4 is Astronomical Units/s\nvoid DialogServiceDistanceRateUnitFrame::on_comboBoxDistanceRateUnitsChoice_currentIndexChanged(int myIndex)\n{\n myFutureUnits = myIndex;\n myFutureDistanceRate = convertDistanceRate(myPastUnits, myFutureUnits, myPastDistanceRate);\n myPastDistanceRate = myFutureDistanceRate;\n myPastUnits = myFutureUnits;\n myRealDistanceRateForXMLSchema = convertDistanceRate (myPastUnits, 0, myFutureDistanceRate);\n}\n\n", "meta": {"hexsha": "94252ab20a13b02030623f1eda0a584d6c2ade28", "size": 3591, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "sta-src/Services/serviceDistanceRateUnit.cpp", "max_stars_repo_name": "hoehnp/SpaceDesignTool", "max_stars_repo_head_hexsha": "9abd34048274b2ce9dbbb685124177b02d6a34ca", "max_stars_repo_licenses": ["IJG"], "max_stars_count": 6.0, "max_stars_repo_stars_event_min_datetime": "2018-09-05T12:41:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-01T05:34:23.000Z", "max_issues_repo_path": "sta-src/Services/serviceDistanceRateUnit.cpp", "max_issues_repo_name": "hoehnp/SpaceDesignTool", "max_issues_repo_head_hexsha": "9abd34048274b2ce9dbbb685124177b02d6a34ca", "max_issues_repo_licenses": ["IJG"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2015-02-07T19:09:21.000Z", "max_issues_repo_issues_event_max_datetime": "2015-08-14T03:15:42.000Z", "max_forks_repo_path": "sta-src/Services/serviceDistanceRateUnit.cpp", "max_forks_repo_name": "hoehnp/SpaceDesignTool", "max_forks_repo_head_hexsha": "9abd34048274b2ce9dbbb685124177b02d6a34ca", "max_forks_repo_licenses": ["IJG"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2015-03-25T15:50:31.000Z", "max_forks_repo_forks_event_max_datetime": "2017-12-06T12:16:47.000Z", "avg_line_length": 33.25, "max_line_length": 129, "alphanum_fraction": 0.701754386, "num_tokens": 1011, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7577943712746406, "lm_q2_score": 0.6584175139669997, "lm_q1q2_score": 0.4989450860328344}} {"text": "/* Copyright (c) 2010-2018, Delft University of Technology\n * All rigths reserved\n *\n * This file is part of the Tudat. Redistribution and use in source and\n * binary forms, with or without modification, are permitted exclusively\n * under the terms of the Modified BSD license. You should have received\n * a copy of the license with this file. If not, please or visit:\n * http://tudat.tudelft.nl/LICENSE.\n *\n * References\n * Montenbruck, O., Gill, E. Satellite Orbits: Models, Methods, Applications, Springer, 2005.\n * The Mathworks, Inc. DOPRI78, Symbolic Math Toolbox, 2012.\n *\n * Notes\n * All the test for this integrator are based on the data generated using the Symbolic Math\n * Toolbox (MathWorks, 2012). Ideally, another source of data should be used to complete the\n * testing.\n *\n * The single step and full integration error tolerances were picked to be as small as\n * possible, without causing the tests to fail. These values are not deemed to indicate any\n * bugs in the code; however, it is important to take these discrepancies into account when\n * using this numerical integrator.\n *\n */\n\n#define BOOST_TEST_MAIN\n\n#include \n#include \n\n#include \"Tudat/Mathematics/NumericalIntegrators/rungeKuttaVariableStepSizeIntegrator.h\"\n#include \"Tudat/InputOutput/matrixTextFileReader.h\"\n#include \"Tudat/Mathematics/NumericalIntegrators/numericalIntegrator.h\"\n#include \"Tudat/Mathematics/NumericalIntegrators/reinitializableNumericalIntegrator.h\"\n#include \"Tudat/Mathematics/NumericalIntegrators/rungeKuttaCoefficients.h\"\n#include \"Tudat/Mathematics/NumericalIntegrators/UnitTests/numericalIntegratorTests.h\"\n#include \"Tudat/Mathematics/NumericalIntegrators/UnitTests/numericalIntegratorTestFunctions.h\"\n\n#include \"Tudat/InputOutput/basicInputOutput.h\"\n#include \"Tudat/Mathematics/BasicMathematics/linearAlgebra.h\"\n\n#include \n#include \n\n#include \n\nnamespace tudat\n{\nnamespace unit_tests\n{\n\nBOOST_AUTO_TEST_SUITE( test_runge_kutta_87_dormand_and_prince_integrator )\n\nusing linear_algebra::flipMatrixRows;\n\nusing numerical_integrators::NumericalIntegratorXdPointer;\nusing numerical_integrators::ReinitializableNumericalIntegratorXdPointer;\nusing numerical_integrators::RungeKuttaVariableStepSizeIntegratorXd;\nusing numerical_integrators::RungeKuttaCoefficients;\n\nusing numerical_integrator_test_functions::computeNonAutonomousModelStateDerivative;\n\n//! Test Runge-Kutta 87 Dormand-Prince integrator using benchmark data from (The MathWorks, 2012).\nBOOST_AUTO_TEST_CASE( testRungeKutta87DormandAndPrinceIntegratorUsingMatlabData )\n{\n using namespace numerical_integrator_tests;\n\n // Read in benchmark data (generated using Symbolic Math Toolbox in Matlab\n // (The MathWorks, 2012)). This data is generated using the DOPRI78 numerical integrator.\n const std::string pathToForwardIntegrationOutputFile = input_output::getTudatRootPath( )\n + \"/Mathematics/NumericalIntegrators/UnitTests\"\n + \"/matlabOutputRungeKutta87DormandPrinceForward.txt\";\n const std::string pathToDiscreteEventIntegrationOutputFile = input_output::getTudatRootPath( )\n + \"/Mathematics/NumericalIntegrators/UnitTests\"\n + \"/matlabOutputRungeKutta87DormandPrinceDiscreteEvent.txt\";\n\n // Store benchmark data in matrix.\n const Eigen::MatrixXd matlabForwardIntegrationData =\n input_output::readMatrixFromFile( pathToForwardIntegrationOutputFile, \",\" );\n Eigen::MatrixXd matlabBackwardIntegrationData = matlabForwardIntegrationData;\n flipMatrixRows( matlabBackwardIntegrationData );\n const Eigen::MatrixXd matlabDiscreteEventIntegrationData =\n input_output::readMatrixFromFile( pathToDiscreteEventIntegrationOutputFile, \",\" );\n\n // Set integrator parameters.\n\n // All of the following parameters are set such that the input data is fully accepted by the\n // integrator, to determine the steps to be taken.\n const double zeroMinimumStepSize = std::numeric_limits< double >::epsilon( );\n const double infiniteMaximumStepSize = std::numeric_limits< double >::infinity( );\n const double infiniteRelativeErrorTolerance = std::numeric_limits< double >::infinity( );\n const double infiniteAbsoluteErrorTolerance = std::numeric_limits< double >::infinity( );\n\n // The following parameters set how the error control mechanism should work.\n const double relativeErrorTolerance = 1.0e-15;\n const double absoluteErrorTolerance = 1.0e-15;\n\n // Case 1: Execute integrateTo() to integrate one step forward in time.\n {\n // Declare integrator with all necessary settings.\n NumericalIntegratorXdPointer integrator\n = std::make_shared< RungeKuttaVariableStepSizeIntegratorXd >(\n RungeKuttaCoefficients::get( RungeKuttaCoefficients::rungeKutta87DormandPrince ),\n &computeNonAutonomousModelStateDerivative,\n matlabForwardIntegrationData( FIRST_ROW, TIME_COLUMN_INDEX ),\n ( Eigen::VectorXd( 1 )\n << matlabForwardIntegrationData( FIRST_ROW,\n STATE_COLUMN_INDEX ) ).finished( ),\n zeroMinimumStepSize,\n infiniteMaximumStepSize,\n infiniteRelativeErrorTolerance,\n infiniteAbsoluteErrorTolerance );\n\n executeOneIntegrateToStep( matlabForwardIntegrationData, 1.0e-15, integrator );\n }\n\n // Case 2: Execute performIntegrationStep() to perform multiple integration steps until final\n // time.\n {\n // Declare integrator with all necessary settings.\n NumericalIntegratorXdPointer integrator\n = std::make_shared< RungeKuttaVariableStepSizeIntegratorXd >(\n RungeKuttaCoefficients::get( RungeKuttaCoefficients::rungeKutta87DormandPrince ),\n &computeNonAutonomousModelStateDerivative,\n matlabForwardIntegrationData( FIRST_ROW, TIME_COLUMN_INDEX ),\n ( Eigen::VectorXd( 1 )\n << matlabForwardIntegrationData( FIRST_ROW,\n STATE_COLUMN_INDEX ) ).finished( ),\n zeroMinimumStepSize,\n infiniteMaximumStepSize,\n infiniteRelativeErrorTolerance,\n infiniteAbsoluteErrorTolerance );\n\n performIntegrationStepToSpecifiedTime( matlabForwardIntegrationData,\n 1.0e-15, 1.0e-15, integrator );\n }\n\n // Case 3: Execute performIntegrationStep() to perform multiple integration steps until initial\n // time (backwards).\n {\n // Declare integrator with all necessary settings.\n NumericalIntegratorXdPointer integrator\n = std::make_shared< RungeKuttaVariableStepSizeIntegratorXd >(\n RungeKuttaCoefficients::get( RungeKuttaCoefficients::rungeKutta87DormandPrince ),\n &computeNonAutonomousModelStateDerivative,\n matlabBackwardIntegrationData( FIRST_ROW, TIME_COLUMN_INDEX ),\n ( Eigen::VectorXd( 1 )\n << matlabBackwardIntegrationData( FIRST_ROW,\n STATE_COLUMN_INDEX ) ).finished( ),\n zeroMinimumStepSize,\n infiniteMaximumStepSize,\n infiniteRelativeErrorTolerance,\n infiniteAbsoluteErrorTolerance );\n\n performIntegrationStepToSpecifiedTime( matlabBackwardIntegrationData,\n 1.0e-15, 1.0e-14, integrator );\n }\n\n // Case 4: Execute integrateTo() to integrate to specified time in one step.\n {\n // Note that this test has a strange issue that the if the absolute error tolerance is set\n // to 1.0e-15, the last step that the integrateTo() function takes does not result in the\n // expected final time of 1.0. As a temporary solution, the absolute error tolerance has\n // been multiplied by 10.0, which seems to solve the problem. This error indicated a\n // possible problem with the implementation of the integrateTo() function, which needs to\n // be investigated in future.\n\n // Declare integrator with all necessary settings.\n NumericalIntegratorXdPointer integrator\n = std::make_shared< RungeKuttaVariableStepSizeIntegratorXd >(\n RungeKuttaCoefficients::get( RungeKuttaCoefficients::rungeKutta87DormandPrince ),\n &computeNonAutonomousModelStateDerivative,\n matlabForwardIntegrationData( FIRST_ROW, TIME_COLUMN_INDEX ),\n ( Eigen::VectorXd( 1 )\n << matlabForwardIntegrationData( FIRST_ROW,\n STATE_COLUMN_INDEX ) ).finished( ),\n zeroMinimumStepSize,\n infiniteMaximumStepSize,\n relativeErrorTolerance,\n absoluteErrorTolerance * 10.0 );\n\n executeIntegrateToToSpecifiedTime( matlabForwardIntegrationData, 1.0e-15, integrator,\n matlabForwardIntegrationData(\n matlabForwardIntegrationData.rows( ) - 1,\n TIME_COLUMN_INDEX ) );\n }\n\n // Case 5: Execute performIntegrationstep() to integrate to specified time in multiple steps,\n // including discrete events.\n {\n // Declare integrator with all necessary settings.\n ReinitializableNumericalIntegratorXdPointer integrator\n = std::make_shared< RungeKuttaVariableStepSizeIntegratorXd >(\n RungeKuttaCoefficients::get( RungeKuttaCoefficients::rungeKutta87DormandPrince ),\n &computeNonAutonomousModelStateDerivative,\n matlabForwardIntegrationData( FIRST_ROW, TIME_COLUMN_INDEX ),\n ( Eigen::VectorXd( 1 )\n << matlabForwardIntegrationData( FIRST_ROW,\n STATE_COLUMN_INDEX ) ).finished( ),\n zeroMinimumStepSize,\n infiniteMaximumStepSize,\n infiniteRelativeErrorTolerance,\n infiniteAbsoluteErrorTolerance );\n\n performIntegrationStepToSpecifiedTimeWithEvents( matlabDiscreteEventIntegrationData,\n 1.0e-15, 1.0e-12, integrator );\n }\n}\n\nBOOST_AUTO_TEST_SUITE_END( )\n\n} // namespace unit_tests\n} // namespace tudat\n", "meta": {"hexsha": "e46f11df1693159d791e2d310cc988198ec975d3", "size": 10825, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Tudat/Mathematics/NumericalIntegrators/UnitTests/unitTestRungeKutta87DormandPrinceIntegrator.cpp", "max_stars_repo_name": "J-Westin/tudat", "max_stars_repo_head_hexsha": "82ebe9e6e2dd51d0688b77960e62e980e6b8bcb8", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Tudat/Mathematics/NumericalIntegrators/UnitTests/unitTestRungeKutta87DormandPrinceIntegrator.cpp", "max_issues_repo_name": "J-Westin/tudat", "max_issues_repo_head_hexsha": "82ebe9e6e2dd51d0688b77960e62e980e6b8bcb8", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Tudat/Mathematics/NumericalIntegrators/UnitTests/unitTestRungeKutta87DormandPrinceIntegrator.cpp", "max_forks_repo_name": "J-Westin/tudat", "max_forks_repo_head_hexsha": "82ebe9e6e2dd51d0688b77960e62e980e6b8bcb8", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.5841121495, "max_line_length": 101, "alphanum_fraction": 0.6686374134, "num_tokens": 2258, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7577943712746406, "lm_q2_score": 0.6584175072643413, "lm_q1q2_score": 0.49894508095359763}} {"text": "//==================================================================================================\n/**\n Copyright 2016 NumScale SAS\n\n Distributed under the Boost Software License, Version 1.0.\n (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)\n**/\n//==================================================================================================\n#include \n#include \n#include \n#include \n\nnamespace bs = boost::simd;\nnamespace bd = boost::dispatch;\n\nSTF_CASE_TPL(\"Check basic behavior of ifrexp\", STF_IEEE_TYPES)\n{\n STF_EXPR_IS ( (bs::ifrexp(T(0)))\n , (std::pair>)\n );\n\n auto p = bs::ifrexp(T(1));\n STF_EQUAL(p.first , T(0.5));\n STF_EQUAL(p.second , T(1));\n}\n\nSTF_CASE_TPL(\"Check behavior of ifrexp on Zero\", STF_IEEE_TYPES)\n{\n auto r = bs::ifrexp(T(0));\n\n STF_EQUAL (r.first , T(0));\n STF_EQUAL (r.second, T(0));\n STF_EQUAL (ldexp(r.first,r.second), T(0));\n}\n\nSTF_CASE_TPL(\"Check behavior of ifrexp on Valmax\", STF_IEEE_TYPES)\n{\n auto r = bs::ifrexp(bs::Valmax());\n\n STF_ULP_EQUAL (r.first , T(1)-bs::Halfeps(), 1);\n STF_EQUAL (r.second, bs::Limitexponent());\n STF_EQUAL (ldexp(r.first,r.second),bs::Valmax());\n}\n", "meta": {"hexsha": "b1facdaab98c476b04607e5c28a05e3c6c419e5a", "size": 1359, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/function/scalar/ifrexp.regular.cpp", "max_stars_repo_name": "TobiasLudwig/boost.simd", "max_stars_repo_head_hexsha": "c04d0cc56747188ddb9a128ccb5715dd3608dbc1", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 6.0, "max_stars_repo_stars_event_min_datetime": "2018-02-25T22:23:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-15T15:13:12.000Z", "max_issues_repo_path": "test/function/scalar/ifrexp.regular.cpp", "max_issues_repo_name": "remymuller/boost.simd", "max_issues_repo_head_hexsha": "3caefb7ee707e5f68dae94f8f31f72f34b7bb5de", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/function/scalar/ifrexp.regular.cpp", "max_forks_repo_name": "remymuller/boost.simd", "max_forks_repo_head_hexsha": "3caefb7ee707e5f68dae94f8f31f72f34b7bb5de", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 7.0, "max_forks_repo_forks_event_min_datetime": "2017-12-12T12:36:31.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-10T14:27:07.000Z", "avg_line_length": 30.2, "max_line_length": 100, "alphanum_fraction": 0.565857248, "num_tokens": 379, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.757794360334681, "lm_q2_score": 0.6584175139669997, "lm_q1q2_score": 0.49894507882977335}} {"text": "// In KAI C++ 3.2c, restrict causes problems for copy propagation.\n// Temporary kludge is to disable use of the restrict keyword.\n\n#define BZ_DISABLE_RESTRICT\n\n#include \n#include \n#include \n#include \n\n#ifdef BENCHMARK_VALARRAY\n#include \n#endif\n\nBZ_USING_NAMESPACE(blitz)\n\n#ifdef BZ_FORTRAN_SYMBOLS_WITH_TRAILING_UNDERSCORES\n #define fdaxpy fdaxpy_\n #define f90daxpy f90daxpy_\n #define fidaxpy fidaxpy_\n #define fidaxpyo fidaxpyo_\n#endif\n\n#ifdef BZ_FORTRAN_SYMBOLS_CAPS\n #define fdaxpy FDAXPY\n #define f90daxpy F90DAXPY\n #define fidaxpy FIDAXPY\n #define fidaxpyo FIDAXPYO\n#endif\n\nextern \"C\" {\n void fdaxpy(const int& N, const double& da, double* x,\n const int& xstride, const double* y, const int& ystride);\n\n void f90daxpy(const double& a, double* x, \n const double* y, const int& length, const int& iters);\n\n void fidaxpy(const double& a, double* x, const double* y,\n const int& length, const int& iters);\n\n void fidaxpyo(const double& a, double* x, const double* y,\n const int& length, const int& iters);\n}\n\nvoid daxpyVectorVersion(BenchmarkExt& bench, double a, double b);\nvoid daxpyArrayVersion(BenchmarkExt& bench, double a);\nvoid daxpyF77Version(BenchmarkExt& bench, double a);\nvoid daxpyBLASVersion(BenchmarkExt& bench, double a);\nvoid daxpyF90Version(BenchmarkExt& bench, double a);\n\n#ifdef BENCHMARK_VALARRAY\nvoid daxpyValarrayVersion(BenchmarkExt& bench, double a);\n#endif\n\nint main()\n{\n\n#ifdef BENCHMARK_VALARRAY\n int numBenchmarks = 6;\n#else\n int numBenchmarks = 5;\n#endif\n\n BenchmarkExt bench(\"DAXPY Benchmark\", numBenchmarks);\n\n const int numSizes = 19;\n bench.setNumParameters(numSizes);\n bench.setRateDescription(\"Mflops/s\");\n\n Vector parameters(numSizes);\n Vector iters(numSizes);\n Vector flops(numSizes);\n\n for (int i=0; i < numSizes; ++i)\n {\n parameters[i] = pow(10.0, (i+1)/4.0);\n iters[i] = 50000000L / parameters[i];\n if (iters[i] < 2)\n iters[i] = 2;\n flops[i] = 2 * parameters[i] * 2;\n }\n\n bench.setParameterVector(parameters);\n bench.setIterations(iters);\n bench.setOpsPerIteration(flops);\n\n bench.beginBenchmarking();\n\n float a = .398498293819823;\n\n daxpyVectorVersion(bench, a, -a);\n daxpyArrayVersion(bench, a);\n daxpyF77Version(bench, a);\n daxpyBLASVersion(bench, a);\n daxpyF90Version(bench, a);\n\n#ifdef BENCHMARK_VALARRAY\n daxpyValarrayVersion(bench, a);\n#endif\n\n bench.endBenchmarking();\n\n bench.saveMatlabGraph(\"daxpy2.m\");\n\n return 0;\n}\n\nvoid initializeRandomDouble(double* data, int numElements, int stride = 1)\n{\n static Random rnd;\n\n for (int i=0; i < numElements; ++i)\n data[i*stride] = rnd.random();\n}\n\ntemplate\nvoid initializeArray(T& array, int numElements)\n{\n static Random rnd;\n\n for (size_t i=0; i < numElements; ++i)\n array[i] = rnd.random();\n}\n\nvoid daxpyVectorVersion(BenchmarkExt& bench, double a, double b)\n{\n bench.beginImplementation(\"Vector\");\n\n while (!bench.doneImplementationBenchmark())\n {\n int N = bench.getParameter();\n\n cout << \"Vector: N = \" << N << endl;\n cout.flush();\n\n long iters = bench.getIterations();\n\n Vector x(N), y(N);\n initializeRandomDouble(x.data(), N);\n initializeRandomDouble(y.data(), N);\n\n bench.start();\n for (long i=0; i < iters; ++i)\n { \n y += a * x;\n y += b * x;\n }\n bench.stop();\n }\n\n bench.endImplementation();\n}\n\n\nvoid daxpyArrayVersion(BenchmarkExt& bench, double a)\n{\n bench.beginImplementation(\"Array\");\n\n while (!bench.doneImplementationBenchmark())\n {\n int N = bench.getParameter();\n\n cout << \"Array: N = \" << N << endl;\n cout.flush();\n\n long iters = bench.getIterations();\n\n Array x(N), y(N);\n initializeRandomDouble(x.data(), N);\n initializeRandomDouble(y.data(), N);\n\n double b = - a;\n\n bench.start();\n for (long i=0; i < iters; ++i)\n {\n y += a * x;\n y += b * x;\n }\n bench.stop();\n }\n\n bench.endImplementation();\n}\n\nvoid daxpyF77Version(BenchmarkExt& bench, double a)\n{\n bench.beginImplementation(\"Fortran 77\");\n\n while (!bench.doneImplementationBenchmark())\n {\n int N = bench.getParameter();\n\n cout << \"Fortran 77: N = \" << N << endl;\n cout.flush();\n\n int iters = bench.getIterations();\n\n double* x = new double[N];\n double* y = new double[N];\n initializeRandomDouble(x, N);\n initializeRandomDouble(y, N);\n\n bench.start();\n fidaxpy(a, x, y, N, iters);\n bench.stop();\n\n delete [] x;\n delete [] y;\n }\n\n bench.endImplementation();\n}\n\n\nvoid daxpyBLASVersion(BenchmarkExt& bench, double a)\n{\n bench.beginImplementation(\"Fortran BLAS\");\n\n while (!bench.doneImplementationBenchmark())\n {\n int N = bench.getParameter();\n\n cout << \"Fortran BLAS: N = \" << N << endl;\n cout.flush();\n\n int iters = bench.getIterations();\n\n double* x = new double[N];\n double* y = new double[N];\n initializeRandomDouble(x, N);\n initializeRandomDouble(y, N);\n\n int xstride = 1, ystride = 1;\n double b = - a;\n\n bench.start();\n for (long i=0; i < iters; ++i)\n {\n fdaxpy(N, a, x, xstride, y, ystride);\n fdaxpy(N, b, x, xstride, y, ystride);\n }\n bench.stop();\n\n delete [] x;\n delete [] y;\n }\n\n bench.endImplementation();\n}\n\nvoid daxpyF90Version(BenchmarkExt& bench, double a)\n{\n bench.beginImplementation(\"Fortran 90\");\n\n while (!bench.doneImplementationBenchmark())\n {\n int N = bench.getParameter();\n\n cout << \"Fortran 90: N = \" << N << endl;\n cout.flush();\n\n int iters = bench.getIterations();\n\n double* x = new double[N];\n double* y = new double[N];\n initializeRandomDouble(x, N);\n initializeRandomDouble(y, N);\n\n bench.start();\n f90daxpy(a, x, y, N, iters);\n bench.stop();\n\n delete [] x;\n delete [] y;\n }\n\n bench.endImplementation();\n}\n\n#ifdef BENCHMARK_VALARRAY\nvoid daxpyValarrayVersion(BenchmarkExt& bench, double a)\n{\n bench.beginImplementation(\"valarray\");\n\n while (!bench.doneImplementationBenchmark())\n {\n int N = bench.getParameter();\n\n cout << \"valarray: N = \" << N << endl;\n cout.flush();\n\n long iters = bench.getIterations();\n\n valarray x(N), y(N);\n initializeArray(x, N);\n initializeArray(y, N);\n\n double b = - a;\n\n bench.start();\n for (long i=0; i < iters; ++i)\n {\n y += a * x;\n y += b * x;\n }\n bench.stop();\n }\n\n bench.endImplementation();\n}\n#endif\n", "meta": {"hexsha": "a84670420474e1519fa3d7c6b4a27b2ba31e3afb", "size": 7074, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "depspawn-blitz-0.10/benchmarks/daxpy2.cpp", "max_stars_repo_name": "fraguela/depspawn", "max_stars_repo_head_hexsha": "b5760f4c0d38a1b245ee5274e2ccc5c5fe2d3d45", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8.0, "max_stars_repo_stars_event_min_datetime": "2017-04-12T11:05:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T11:10:27.000Z", "max_issues_repo_path": "ibtk/third_party/blitz-0.10/benchmarks/daxpy2.cpp", "max_issues_repo_name": "MSV-Project/IBAMR", "max_issues_repo_head_hexsha": "3cf614c31bb3c94e2620f165ba967cba719c45ea", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ibtk/third_party/blitz-0.10/benchmarks/daxpy2.cpp", "max_forks_repo_name": "MSV-Project/IBAMR", "max_forks_repo_head_hexsha": "3cf614c31bb3c94e2620f165ba967cba719c45ea", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.6006389776, "max_line_length": 74, "alphanum_fraction": 0.5976816511, "num_tokens": 1896, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7577943712746406, "lm_q2_score": 0.658417500561683, "lm_q1q2_score": 0.4989450758743609}} {"text": "#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"otherheaders.h\"\n\n/// TODO: @Yupeng added\n#include \n#include \n#include \n\n#include \n#include \n#include \n\n// using namespace Eigen;\ntypedef Eigen::SparseMatrix SpMat;\ntypedef Eigen::Triplet T;\n\nusing namespace cv;\nusing namespace std;\n\n/* sort pair based on first value */\ntypedef std::pair mypair;\nbool comparatorPair(const mypair& l, const mypair& r) {\n return l.first < r.first;\n}\n\nvoid spectralPb(std::vector& sPb, Mat* mPb, Size orig_size, std::string outFile, int nvec) {\n int ty = mPb->rows;\n int tx = mPb->cols;\n /*\n Mat A1 = mPb->t();\n int ty = A1.rows;\n int tx = A1.cols;\n */\n\n /*\n cout << \"========Printing mPb ============\" <rows; i++) {\n ptrData = (double*) mPb->ptr(i);\n for (int j = 0; j < mPb->cols; j++) {\n cout << ptrData[j] << \",\";\n }\n cout << endl;\n }\n cout << \"========Ended Printing mPb ============\" <copyTo(l1(Rect(0, 1, tx, ty)));\n mPb->copyTo(l2(Rect(1, 0, tx, ty)));\n\n log_info(\"Setting the sparse symmetric affinity matrix\");\n vector val;\n vector I; /// FIXME: long int or long long int??\n vector J; /// FIXME: long int or long long int??\n vector valD;\n\n time_t now1, now2, now3, now4, now5, now5a, now5b, now6, now7, now8, now9, now10, now11;\n time(&now1);\n buildW(valD, val, I, J, l1, l2); /// FIXME: check the output order of val I and J\n time(&now2);\n\n log_info(\"At the end of buildW: %ld\", now2 - now1);\n\n /// TODO: @Yupeng uncomment @Manu's display codes\n /*\n cout << \"=====Display matrix =======\" << endl;\n for (int i = 0 ;i < val.size(); i++) {\n cout << \"(\"< pcol(n + 1, -1);\n int colInd = 0;\n pcol.at(colInd) = 0;\n for (int i = 1; i < nnz; i++) {\n if (I.at(i) != I.at(i - 1)) pcol.at(++colInd) = i;\n }\n pcol.at(++colInd) = nnz;\n\n time(&now5a);\n\n log_debug(\"Time pcol: %ld\", now5a - now4);\n\n /// FIXME: part below commented because of more efficient version above\n /*\n vector pcol_old(n + 1, -1);\n std::vector::iterator it;\n for (int i = 0; i < n; i++) {\n fs << \"orig_size\" << orig_size;\n it = find(I.begin(), I.end(), i);\n if (it != I.end()) {\n pcol_old.at(i) = it - I.begin();\n }\n }\n pcol_old.at(n) = nnz;\n */\n\n time(&now5);\n log_debug(\"Time pcol_old: %ld\", now5 - now5a);\n\n time(&now5b);\n log_debug(\"Comparison time: %ld\", now5b - now5);\n\n int nnzD = n;\n std::vector valDmW(nnz);\n for (int i = 0; i < nnz; i++) {\n if (I.at(i) != J.at(i)) {\n valDmW.at(i) = -val.at(i);\n }\n else {\n /// FIXME: if any element changes to zero\n valDmW.at(i) = valD.at(I.at(i)) - val.at(i);\n }\n }\n\n time(&now6);\n log_info(\"Time pcol: %ld\", now6 - now5);\n std::vector IB;\n IB.reserve(n);\n int cnt = 0;\n std::generate_n(std::back_inserter(IB), n, [cnt]() mutable { return cnt++; });\n\n time(&now7);\n log_info(\"Time generate_n: %ld\", now7 - now6);\n // cout << IB.at(0) << \",\" << IB.at(n - 1) << \",\" << nnzD << endl;\n\n std::vector JB(IB);\n JB.push_back(nnzD);\n\n std::vector valB(nnzD);\n for (int i = 0; i < nnzD; i++) {\n valB.at(i) = 0;\n if (pcol.at(i) != -1)\n for (int j = pcol[i]; j < pcol[i + 1]; j++) {\n valB.at(i) += val.at(j);\n }\n }\n\n /*\n cout << \"=====Display matrix =======\" << endl;\n for (int i = 0; i < valD.size(); i++) {\n cout << \"(\"< D2mW2(nD, nD);\n std::vector D2mW2_tripletList;\n D2mW2_tripletList.reserve(nnz);\n for (int i = 0; i < nnz; i++) {\n /* I is rowIdx, J is colIdx, condition \"tempCol >= row\" for push_back of I, J, val */\n D2mW2_tripletList.push_back(T(J.at(i), I.at(i), valDmW.at(i)));\n }\n D2mW2.setFromTriplets(D2mW2_tripletList.begin(), D2mW2_tripletList.end());\n D2mW2.makeCompressed();\n\n Eigen::SparseMatrix D2(nD, nD);\n std::vector D2_tripletList;\n D2_tripletList.reserve(nD);\n for (int i = 0; i < nD; i++) {\n D2_tripletList.push_back(T(i, i, valD.at(i)));\n }\n D2.setFromTriplets(D2_tripletList.begin(), D2_tripletList.end());\n D2.makeCompressed();\n\n Spectra::SparseGenMatProd opA(D2mW2);\n Spectra::SparseCholesky opB(D2);\n\n /// FIXME: @Yupeng maxLabel is something got after watershed\n // int ncv = (maxLabel < 2*nvec) ? maxLabel : 2*nvec;\n log_debug(\"Spectra nvec: %d\", nvec);\n int ncv = 2 * nvec; /* nev < ncv <= n (size of matrix) */\n\n Spectra::SymGEigsSolver,\n Spectra::SparseCholesky, Spectra::GEIGS_CHOLESKY>\n geigs(&opA, &opB, nvec, ncv);\n\n geigs.init();\n int nconv = geigs.compute();\n\n Eigen::VectorXd evalues;\n Eigen::MatrixXd evecs;\n if (geigs.info() == Spectra::SUCCESSFUL) {\n evalues = geigs.eigenvalues(); /* nvec */\n evecs = geigs.eigenvectors(); /* (rows, cols) = (nD, nvec) */\n }\n else {\n log_error(\"Geigs is failing\");\n }\n log_debug(\"Rows: %ld, Cols: %ld\", evecs.rows(), evecs.cols());\n stringstream sStream;\n sStream << evalues;\n log_debug(\"Generalized eigenvalues found: %s\", sStream.str().c_str());\n sStream.str(\"\");\n sStream << evecs.topRows(10);\n log_debug(\"Generalized eigenvectors found: %s\", sStream.str().c_str());\n\n log_debug(\"n: %d\", n);\n log_debug(\"Rows: %ld, Cols: %ld\", evecs.rows(), evecs.cols());\n log_debug(\"VectorXd size: %ld\", evalues.size());\n\n /* data interface conversion */\n p_eigVals = new double[nvec];\n for (int i = 0; i < nvec; i++) p_eigVals[i] = evalues(i);\n p_eigVec = new double[n * nvec]; /// TODO: not sure about the conflict between n and nD\n for (int i = 0; i < nvec; i++) {\n for (int j = 0; j < nD; j++) {\n p_eigVec[i * nD + j] = evecs(j, i);\n }\n }\n\n /* eigenvector computation using Spectra library ends */\n\n std::vector vect(nvec);\n vect.at(0) = new Mat(ty, tx, CV_64FC1, Scalar::all(0));\n\n double* data;\n double minVal, maxVal, minXi, maxXi;\n Point minLoc, maxLoc;\n std::string imageName;\n double alpha, beta;\n Mat destIm(ty, tx, CV_64FC1);\n for (int i = 1; i < nvec; i++) { /* excluding i = 0 */\n vect.at(i) = new Mat(ty, tx, CV_64FC1);\n data = (double*)vect.at(i)->data;\n for (int j = 0; j < n; j++) {\n data[j] = p_eigVec[i * n + j];\n }\n minMaxLoc(*vect.at(i), &minVal, &maxVal, &minLoc, &maxLoc);\n *(vect.at(i)) -= minVal;\n *(vect.at(i)) /= maxVal - minVal; /// FIXME: check order of precedence\n minMaxLoc(*vect.at(i), &minVal, &maxVal, &minLoc, &maxLoc);\n }\n\n /* OE parameters */\n int hil = 0;\n int deriv = 1;\n int support = 3;\n double sigma = 1.0;\n int nOrient = 8;\n double dtheta = PI / nOrient;\n vector ch_per = {3, 2, 1, 0, 7, 6, 5, 4};\n double theta = 0.0;\n sPb.resize(nOrient);\n /* initilaize sPb */\n for (int i = 0; i < nOrient; i++) {\n sPb.at(i) = new Mat(ty, tx, CV_64FC1, Scalar::all(0));\n }\n\n log_info(\"Filtering the sPb values\");\n\n for (int i = 1; i < nvec; i++) {\n if (p_eigVals[i] >\n /// TODO: check what happens for zero of smallest eigenvalue\n std::numeric_limits::epsilon()) {\n log_debug(\"Eigenvalue %d: %f\", i, p_eigVals[i]);\n *(vect.at(i)) /= sqrt(p_eigVals[i]);\n for (int o = 0; o < nOrient; o++) {\n double theta = dtheta * static_cast(o);\n /// FIXME: hil not handled and also not required\n Mat f = oeFilter(sigma, support, theta, deriv, hil);\n Mat destIm;\n filter2D(*vect.at(i), destIm, -1, f, Point(-1, -1), 0, BORDER_REFLECT_101);\n *(sPb.at(ch_per.at(o))) += abs(destIm);\n }\n }\n }\n\n /// FIXME: @Yupeng comment\n /*\n double alpha, beta;\n for (int i = 0; i < nOrient; i++) {\n // cout << \"here_seg\" << endl;\n imageName = outFile + \"_sPb_i_\" + to_string(i) + \".png\";\n minMaxLoc(*sPb.at(i), &minXi, &maxXi);\n alpha = 255.0 / (maxXi - minXi);\n beta = -minXi * 255.0 / (maxXi - minXi);\n Mat destIm;\n sPb.at(i)->convertTo(destIm, CV_8U, alpha, beta);\n imwrite(imageName, destIm);\n }\n cout << \"here seg 1\" << endl;\n */\n\n /// FIXME: @Yupeng potential memory leak problems\n delete[] p_eigVals;\n delete[] p_eigVec;\n}\n", "meta": {"hexsha": "d9356e7ec42b973d830693340c12bf462a7bf31c", "size": 10960, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/spectralPb.cpp", "max_stars_repo_name": "yanyp/UCM-MPI-GPU", "max_stars_repo_head_hexsha": "24f6794820c0efd4e99337030b5051994bea9223", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 7.0, "max_stars_repo_stars_event_min_datetime": "2018-05-05T03:51:01.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-30T01:05:33.000Z", "max_issues_repo_path": "src/spectralPb.cpp", "max_issues_repo_name": "yanyp/UCM-MPI-GPU", "max_issues_repo_head_hexsha": "24f6794820c0efd4e99337030b5051994bea9223", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2020-08-13T08:49:05.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-13T08:49:05.000Z", "max_forks_repo_path": "src/spectralPb.cpp", "max_forks_repo_name": "yanyp/UCM-MPI-GPU", "max_forks_repo_head_hexsha": "24f6794820c0efd4e99337030b5051994bea9223", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8604651163, "max_line_length": 104, "alphanum_fraction": 0.5373175182, "num_tokens": 3578, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7577943712746407, "lm_q2_score": 0.6584175005616829, "lm_q1q2_score": 0.4989450758743609}} {"text": "#ifndef UTIL_HPP\n#define UTIL_HPP\n\n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n\n// Google Log hack\n#define LR LOG(INFO)<<\"(rank:\"<rank<<\") \"\n\n// Container hack\n#define RANGE(x) ((x).begin()), ((x).end())\n#define SUM(x) (std::accumulate(RANGE(x), .0))\n\n// Random hack\n#include \n#include \n#define CLOCK (std::chrono::system_clock::now().time_since_epoch().count())\nstatic std::mt19937 _rng(CLOCK);\nstatic std::uniform_real_distribution _unif01;\nstatic std::normal_distribution _stdnormal;\n\nstruct ThreadRNG {\n ThreadRNG() : rng_(CLOCK) {}\n std::mt19937 rng_;\n std::uniform_real_distribution unif01_;\n std::normal_distribution stdnormal_;\n};\n\n// Eigen\n#define EIGEN_INITIALIZE_MATRICES_BY_ZERO\n#define EIGEN_DEFAULT_IO_FORMAT \\\n Eigen::IOFormat(StreamPrecision,1,\" \",\" \",\"\",\"\",\"[\",\"]\")\n#include \nusing EMatrix = Eigen::MatrixXd;\nusing EVector = Eigen::VectorXd;\nusing EArray = Eigen::ArrayXd;\nusing EMAtrix = Eigen::ArrayXXd;\nusing EMatrixMap = Eigen::Map;\nusing EVectorMap = Eigen::Map;\nusing EArrayMap = Eigen::Map;\n\ntemplate\nusing TMatrix = Eigen::Matrix;\n\ntemplate\nstatic void save_matrix(const TMatrix& mat, std::string file) {\n std::ofstream fout(file); CHECK(fout.good());\n Eigen::IOFormat space_sep(Eigen::StreamPrecision,0,\" \",\"\\n\",\"\",\"\",\"\",\"\");\n fout << mat.format(space_sep);\n fout.close();\n}\n\ntemplate\nstatic void load_matrix(TMatrix *mat, std::string file) {\n std::ifstream fin(file); CHECK(fin.good());\n int nrows = 0;\n std::string line;\n while (std::getline(fin, line)) ++nrows;\n fin.clear(); fin.seekg(0); // rewind\n T val;\n std::vector flat; // row major\n while (not fin.eof()) {\n fin >> val;\n flat.push_back(val);\n }\n fin.close();\n mat->resize(flat.size() / nrows, nrows);\n memcpy(mat->data(), flat.data(), sizeof(double) * flat.size());\n mat->transposeInPlace();\n}\n\ntemplate\nstatic void rowwise_max_ind(const TMatrix& mat, TMatrix *ind) {\n int nrows = mat.rows();\n ind->resize(nrows, 1);\n for (int i = 0; i < nrows; ++i) {\n mat.row(i).maxCoeff(ind->data() + i);\n }\n}\n\n// Multivariate Normal with mean = covariance * v; covariance = inv(precision)\nstatic EVector draw_mvgaussian(const EMatrix& precision, const EVector& v) {\n Eigen::LLT chol;\n chol.compute(precision);\n EVector alpha = chol.matrixL().solve(v); // alpha = L' * mu\n EVector mu = chol.matrixU().solve(alpha);\n EVector z(v.size());\n for (int i = 0; i < v.size(); ++i)\n z(i) = _stdnormal(_rng);\n EVector x = chol.matrixU().solve(z);\n return mu + x;\n}\n\n// Get monotonic time in seconds from a starting point\nstatic double get_time() {\n struct timespec start;\n clock_gettime(CLOCK_MONOTONIC, &start);\n return (start.tv_sec + start.tv_nsec/1000000000.0);\n}\n\nclass Timer {\npublic:\n void tic() { start_ = get_time(); }\n double toc() { double ret = get_time() - start_; time_ += ret; return ret; }\n double get() { return time_; }\nprivate:\n double time_ = .0;\n double start_ = get_time();\n};\n\n// Google flags hack\nstatic void print_help() {\n fprintf(stderr, \"Program Flags:\\n\");\n std::vector all_flags;\n google::GetAllFlags(&all_flags);\n for (const auto& flag : all_flags) {\n if (flag.filename.find(\"src/\") != 0) // HACK: filter out built-in flags\n fprintf(stderr,\n \"-%s: %s (%s, default:%s)\\n\",\n flag.name.c_str(),\n flag.description.c_str(),\n flag.type.c_str(),\n flag.default_value.c_str());\n }\n exit(1);\n}\n\n// Google flags hack\nstatic void print_flags() {\n LOG(INFO) << \"---------------------------------------------------------------------\";\n std::vector all_flags;\n google::GetAllFlags(&all_flags);\n for (const auto& flag : all_flags) {\n if (flag.filename.find(\"src/\") != 0) // HACK: filter out built-in flags\n LOG(INFO) << flag.name << \": \" << flag.current_value;\n }\n LOG(INFO) << \"---------------------------------------------------------------------\";\n}\n\n// Faster strtol without error checking.\nstatic long int strtol(const char *nptr, char **endptr) {\n // Skip spaces\n while (isspace(*nptr)) ++nptr;\n // Sign\n bool is_negative = false;\n if (*nptr == '-') {\n is_negative = true;\n ++nptr;\n } else if (*nptr == '+') {\n ++nptr;\n }\n // Go!\n long int res = 0;\n while (isdigit(*nptr)) {\n res = (res * 10) + (*nptr - '0');\n ++nptr;\n }\n if (endptr != NULL) *endptr = (char *)nptr;\n if (is_negative) return -res;\n return res;\n}\n\n#endif\n", "meta": {"hexsha": "07610662087466719542bf745dc889692ae5c555", "size": 4759, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/strads/apps/medlda_release/util.hpp", "max_stars_repo_name": "daiwei89/wdai_petuum_public", "max_stars_repo_head_hexsha": "4068859897061201d0a63630a3da6011b0d0f75f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 18.0, "max_stars_repo_stars_event_min_datetime": "2015-07-10T04:23:33.000Z", "max_stars_repo_stars_event_max_datetime": "2017-01-18T09:02:13.000Z", "max_issues_repo_path": "src/strads/apps/medlda_release/util.hpp", "max_issues_repo_name": "daiwei89/wdai_petuum_public", "max_issues_repo_head_hexsha": "4068859897061201d0a63630a3da6011b0d0f75f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2015-10-09T19:51:57.000Z", "max_issues_repo_issues_event_max_datetime": "2015-10-10T10:09:43.000Z", "max_forks_repo_path": "src/strads/apps/medlda_release/util.hpp", "max_forks_repo_name": "daiwei89/wdai_petuum_public", "max_forks_repo_head_hexsha": "4068859897061201d0a63630a3da6011b0d0f75f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 17.0, "max_forks_repo_forks_event_min_datetime": "2017-02-17T14:40:51.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-07T06:48:26.000Z", "avg_line_length": 27.9941176471, "max_line_length": 87, "alphanum_fraction": 0.6257617146, "num_tokens": 1316, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7577943712746406, "lm_q2_score": 0.658417487156366, "lm_q1q2_score": 0.49894506571588715}} {"text": "/**\n * vector_mutator_test.cc\n * Copyright 2016 John Lawson\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n * \n * http://www.apache.org/licenses/LICENSE-2.0\n * \n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n#include \"vector_mutator.h\"\n\n#include \"gtest/gtest.h\"\n#include \"qv/quiver_matrix.h\"\n\n#include \n\nnamespace refl {\nnamespace {\nusing Matrix = arma::Mat;\n}\nTEST(VecMut, A2) {\n\tMatrix a = { { 2, 1 }, { 1, 2 } };\n\tcluster::QuiverMatrix q(\"{ { 0 1 } { -1 0 } }\");\n\tVectorMutator vm(q, a);\n\tarma::mat vecs = { { 0, 1 }, { 1, 0 } };\n\tarma::mat res(2, 2);\n\tvm.mutate(vecs, 0, res);\n\tarma::mat exp = { { 0, 1 }, { 1, 0 } };\n\tEXPECT_TRUE(arma::all(arma::all(exp == res)));\n\n\tarma::mat res2(2, 2);\n\tvm.mutate(vecs, 1, res2);\n\tarma::mat exp2 = { { -1, 1 }, { 1, 0 } };\n\tEXPECT_TRUE(arma::all(arma::all(exp2 == res2)));\n}\nTEST(VecMut, A3) {\n\tMatrix a = { { 2, 1, -1 }, { 1, 2, 1 }, { -1, 1, 2 } };\n\tcluster::QuiverMatrix q(\"{ { 0 1 0 } { -1 0 1 } { 0 -1 0 } }\");\n\tVectorMutator vm(q, a);\n\tarma::mat vecs = { { 1, 0, 0 }, { 0, 1, 0 }, { 0, 0, 1 } };\n\tarma::mat res(3, 3);\n\tvm.mutate(vecs, 0, res);\n\tarma::mat exp = vecs;\n\tEXPECT_TRUE(arma::all(arma::all(exp == res)));\n\n\tarma::mat res2(3, 3);\n\tvm.mutate(vecs, 1, res2);\n\tarma::mat exp2 = { { 1, 0, 0 }, { -1, 1, 0 }, { 0, 0, 1 } };\n\tEXPECT_TRUE(arma::all(arma::all(exp2 == res2)));\n\n\tarma::mat res3(3, 3);\n\tvm.mutate(vecs, 2, res3);\n\tarma::mat exp3 = { { 1, 0, 0 }, { 0, 1, 0 }, { 0, -1, 1 } };\n\tEXPECT_TRUE(arma::all(arma::all(exp2 == res2)));\n}\nTEST(VecMut, A3signs) {\n\tMatrix a = { { 2, -1, -1 }, { -1, 2, -1 }, { -1, -1, 2 } };\n\tcluster::QuiverMatrix q(\"{ { 0 1 0 } { -1 0 1 } { 0 -1 0 } }\");\n\tVectorMutator vm(q, a);\n\tarma::mat vecs = { { 1, 0, 0 }, { 0, 1, 0 }, { 0, 0, 1 } };\n\tarma::mat res(3, 3);\n\tvm.mutate(vecs, 0, res);\n\tarma::mat exp = vecs;\n\tEXPECT_TRUE(arma::all(arma::all(exp == res)));\n\n\tarma::mat res2(3, 3);\n\tvm.mutate(vecs, 1, res2);\n\tarma::mat exp2 = { { 1, 0, 0 }, { 1, 1, 0 }, { 0, 0, 1 } };\n\tEXPECT_TRUE(arma::all(arma::all(exp2 == res2)));\n\n\tarma::mat res3(3, 3);\n\tvm.mutate(vecs, 2, res3);\n\tarma::mat exp3 = { { 1, 0, 0 }, { 0, 1, 0 }, { 0, 1, 1 } };\n\tEXPECT_TRUE(arma::all(arma::all(exp2 == res2)));\n}\n}\n", "meta": {"hexsha": "d8d5623c5ef29504cb1f67a1cccf5d82d39a1f69", "size": 2605, "ext": "cc", "lang": "C++", "max_stars_repo_path": "test/vector_mutator_test.cc", "max_stars_repo_name": "jwlawson/qvrefl", "max_stars_repo_head_hexsha": "e843c48837949c5bb76d66959530e7cd55fec0ec", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/vector_mutator_test.cc", "max_issues_repo_name": "jwlawson/qvrefl", "max_issues_repo_head_hexsha": "e843c48837949c5bb76d66959530e7cd55fec0ec", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/vector_mutator_test.cc", "max_forks_repo_name": "jwlawson/qvrefl", "max_forks_repo_head_hexsha": "e843c48837949c5bb76d66959530e7cd55fec0ec", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0119047619, "max_line_length": 75, "alphanum_fraction": 0.5692898273, "num_tokens": 1049, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.757794360334681, "lm_q2_score": 0.658417487156366, "lm_q1q2_score": 0.49894505851282644}} {"text": "//\n// Created by kellerberrin on 19/4/21.\n//\n\n\n#include \"kol_OntologyTypes.h\"\n#include \"kol_InformationAncestorMean.h\"\n\n#include \"kol_SetUtilities.h\"\n#include \"kol_Accumulators.h\"\n\n\n#include \n\n\nnamespace kol = kellerberrin::ontology;\n\n\n//! A method for calculating the shared infromation between two concepts.\n/*!\n This method returns the shared information between two concepts.\n*/\ndouble kol::InformationAncestorMean::sharedInformation(const std::string &termA, const std::string &termB) const {\n // return 0 for any terms not in the datbase\n if (not ic_map_ptr_->validateTerms(termA, termB)) {\n\n return 0.0;\n\n }\n\n Accumulators::MeanAccumulator meanIC;\n\n OntologySetType ancestorsA = graph_ptr_->getSelfAncestorTerms(termA);\n OntologySetType ancestorsB = graph_ptr_->getSelfAncestorTerms(termB);\n\n OntologySetType sharedAncestors = SetUtilities::setIntersection(ancestorsA, ancestorsB);\n\n for (auto const &term : sharedAncestors) {\n\n meanIC(ic_map_ptr_->termInformation(term));\n\n }\n\n return Accumulators::extractMean(meanIC);\n\n}\n\n", "meta": {"hexsha": "0309ebaf1ee215ef3e2504f7621fc227ce36b4b8", "size": 1128, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "kol_ontology/kol_InformationAncestorMean.cpp", "max_stars_repo_name": "kellerberrin/KGL_Gene", "max_stars_repo_head_hexsha": "f8e6c14b8b2009d82d692b28354561b5f0513c5e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-04-09T16:24:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-09T16:24:06.000Z", "max_issues_repo_path": "kol_ontology/kol_InformationAncestorMean.cpp", "max_issues_repo_name": "kellerberrin/KGL_Gene", "max_issues_repo_head_hexsha": "f8e6c14b8b2009d82d692b28354561b5f0513c5e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kol_ontology/kol_InformationAncestorMean.cpp", "max_forks_repo_name": "kellerberrin/KGL_Gene", "max_forks_repo_head_hexsha": "f8e6c14b8b2009d82d692b28354561b5f0513c5e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.5, "max_line_length": 115, "alphanum_fraction": 0.7562056738, "num_tokens": 294, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES\n\n", "lm_q1_score": 0.8418256551882382, "lm_q2_score": 0.5926665999540698, "lm_q1q2_score": 0.49892194881452023}} {"text": "/*******************************************************************************\n * Copyright (c) 2018-, UT-Battelle, LLC.\n * All rights reserved. This program and the accompanying materials\n * are made available under the terms of the MIT License \n * which accompanies this distribution. \n *\n * Contributors:\n * Alexander J. McCaskey - initial API and implementation\n * Thien Nguyen - implementation\n *******************************************************************************/\n#include \"qsim_utils.hpp\"\n#include \n#include \n#include \n#include \nnamespace qcor {\nnamespace QuaSiMo {\nstd::shared_ptr\ngetEvaluator(Operator *observable, const HeterogeneousMap ¶ms) {\n // If an evaluator was provided explicitly:\n if (params.pointerLikeExists(\"evaluator\")) {\n return xacc::as_shared_ptr(\n params.getPointerLike(\"evaluator\"));\n }\n\n // Cost Evaluator was provided by name:\n if (params.stringExists(\"evaluator\")) {\n return getObjEvaluator(observable, params.getString(\"evaluator\"));\n }\n\n // No specific evaluator/evaluation method was requested,\n // use the default one (partial tomography based).\n return getObjEvaluator(observable);\n}\n\nPronyResult pronyFit(const std::vector> &in_signal) {\n assert(!in_signal.empty());\n\n // Returns the Hankel matrix constructed from the first column c, and\n // (optionally) the last row r.\n auto hankelMat =\n [](const std::vector> &c,\n const std::vector> &r) -> Eigen::MatrixXcd {\n Eigen::MatrixXcd result = Eigen::MatrixXcd::Zero(c.size(), r.size());\n const auto m = c.size();\n for (int i = 0; i < result.rows(); ++i) {\n for (int j = 0; j < result.cols(); ++j) {\n // H(i,j) = c(i+j), i+j < m;\n if (i + j < m) {\n result(i, j) = c[i + j];\n } else {\n // H(i,j) = r(i+j+1-m), otherwise\n result(i, j) = r[i + j + 1 - m];\n }\n }\n }\n\n return result;\n };\n\n // Pythonic vector slice:\n auto vectorSlice = [](const std::vector> &in_vec,\n int in_start, int in_end) {\n std::vector> result;\n auto startIter = in_vec.begin() + in_start;\n auto endIter =\n in_end > 0 ? in_vec.begin() + in_end : (in_vec.end() + in_end);\n result.assign(startIter, endIter);\n return result;\n };\n\n const size_t num_freqs = in_signal.size() / 2;\n auto hankel0 = hankelMat(vectorSlice(in_signal, 0, num_freqs),\n vectorSlice(in_signal, num_freqs - 1, -1));\n auto hankel1 = hankelMat(vectorSlice(in_signal, 1, num_freqs + 1),\n vectorSlice(in_signal, num_freqs, 0));\n // std::cout << \"Hankel Matrix 0: \\n\" << hankel0 << \"\\n\";\n // std::cout << \"Hankel Matrix 1: \\n\" << hankel1 << \"\\n\";\n\n hankel0.transposeInPlace();\n hankel1.transposeInPlace();\n Eigen::MatrixXcd shift_matrix =\n Eigen::MatrixXcd::Zero(hankel0.cols(), hankel1.cols());\n\n for (size_t i = 0; i < hankel1.cols(); ++i) {\n Eigen::VectorXcd shift_matrix_col =\n hankel0.fullPivHouseholderQr().solve(hankel1.col(i));\n shift_matrix.col(i) = shift_matrix_col;\n }\n // std::cout << \"Shift matrix: \\n\" << shift_matrix << \"\\n\";\n\n shift_matrix.transposeInPlace();\n Eigen::ComplexEigenSolver s(shift_matrix);\n auto phases = s.eigenvalues();\n Eigen::MatrixXcd generation_matrix =\n Eigen::MatrixXcd::Zero(in_signal.size(), phases.size());\n for (int i = 0; i < generation_matrix.rows(); ++i) {\n for (int j = 0; j < generation_matrix.cols(); ++j) {\n generation_matrix(i, j) = std::pow(phases[j], i);\n }\n }\n\n auto signalData = in_signal;\n Eigen::VectorXcd signal =\n Eigen::Map(signalData.data(), signalData.size());\n Eigen::VectorXcd amplitudes =\n generation_matrix.fullPivHouseholderQr().solve(signal);\n assert(phases.size() == amplitudes.size());\n // std::cout << \"Amplitude:\\n\" << amplitudes << \"\\n\";\n // std::cout << \"Phases:\\n\" << phases << \"\\n\";\n PronyResult finalResult;\n for (size_t i = 0; i < phases.size(); ++i) {\n finalResult.emplace_back(std::make_pair(amplitudes(i), phases(i)));\n }\n\n // Sort by amplitude:\n std::sort(finalResult.begin(), finalResult.end(),\n [](const auto &a, const auto &b) {\n return std::abs(a.first) < std::abs(b.first);\n });\n\n return finalResult;\n}\n} // namespace QuaSiMo\n} // namespace qcor", "meta": {"hexsha": "b12f110f11dea3cc4c076fa79c01179b893b8405", "size": 4561, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "lib/quasimo/impls/utils/qsim_utils.cpp", "max_stars_repo_name": "vetter/qcor", "max_stars_repo_head_hexsha": "6f86835737277a26071593bb10dd8627c29d74a3", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 59.0, "max_stars_repo_stars_event_min_datetime": "2019-08-22T18:40:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T04:12:42.000Z", "max_issues_repo_path": "lib/quasimo/impls/utils/qsim_utils.cpp", "max_issues_repo_name": "vetter/qcor", "max_issues_repo_head_hexsha": "6f86835737277a26071593bb10dd8627c29d74a3", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 137.0, "max_issues_repo_issues_event_min_datetime": "2019-09-13T15:50:18.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-06T14:19:46.000Z", "max_forks_repo_path": "lib/quasimo/impls/utils/qsim_utils.cpp", "max_forks_repo_name": "vetter/qcor", "max_forks_repo_head_hexsha": "6f86835737277a26071593bb10dd8627c29d74a3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 26.0, "max_forks_repo_forks_event_min_datetime": "2019-07-08T17:30:35.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-03T16:24:12.000Z", "avg_line_length": 36.488, "max_line_length": 81, "alphanum_fraction": 0.6103924578, "num_tokens": 1229, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8418256472515683, "lm_q2_score": 0.5926665999540698, "lm_q1q2_score": 0.4989219441107211}} {"text": "#include \"TriangleMuscleConstraint.h\"\n#include \n#include \n#include \n#include \n\nusing namespace FEM;\nTriangleMuscleConstraint::\nTriangleMuscleConstraint(double stiffness,const Eigen::Vector2d& fiber_direction,int i0,int i1,int i2,double area,const Eigen::Matrix2d& invDm)\n\t:Constraint(stiffness),mFiberDirection(fiber_direction),\n\tmi0(i0),mi1(i1),mi2(i2),mArea(area),mInvDm(invDm),mActivationLevel(0.0)\n{\n\n}\nint\nTriangleMuscleConstraint::\nGetDof()\n{\n\treturn 1;\n}\n\nvoid\nTriangleMuscleConstraint::\nEvaluateJMatrix(int index, std::vector>& J_triplets)\n{\n\tEigen::MatrixXd Ai(3,9);\n\n\tEigen::Vector2d v = mInvDm*mFiberDirection;\n\n\tAi<<\n\t\t-v[0]-v[1],0,0,v[0],0,0,v[1],0,0,\n\t\t0,-v[0]-v[1],0,0,v[0],0,0,v[1],0,\n\t\t0,0,-v[0]-v[1],0,0,v[0],0,0,v[1];\n\n\tEigen::MatrixXd MuAiT = mStiffness*mArea*Ai.transpose();\n\n\tint idx[3] = {mi0,mi1,mi2};\n\n\tfor(int i=0;i<3;i++)\n\t{\n\t\tJ_triplets.push_back(Eigen::Triplet(3*idx[i]+0, 3*(index)+0, MuAiT(3*i+0,0)));\n\t\tJ_triplets.push_back(Eigen::Triplet(3*idx[i]+0, 3*(index)+1, MuAiT(3*i+0,1)));\n\t\tJ_triplets.push_back(Eigen::Triplet(3*idx[i]+0, 3*(index)+2, MuAiT(3*i+0,2)));\n\t\tJ_triplets.push_back(Eigen::Triplet(3*idx[i]+1, 3*(index)+0, MuAiT(3*i+1,0)));\n\t\tJ_triplets.push_back(Eigen::Triplet(3*idx[i]+1, 3*(index)+1, MuAiT(3*i+1,1)));\n\t\tJ_triplets.push_back(Eigen::Triplet(3*idx[i]+1, 3*(index)+2, MuAiT(3*i+1,2)));\n\t\tJ_triplets.push_back(Eigen::Triplet(3*idx[i]+2, 3*(index)+0, MuAiT(3*i+2,0)));\n\t\tJ_triplets.push_back(Eigen::Triplet(3*idx[i]+2, 3*(index)+1, MuAiT(3*i+2,1)));\n\t\tJ_triplets.push_back(Eigen::Triplet(3*idx[i]+2, 3*(index)+2, MuAiT(3*i+2,2)));\n\t}\n}\nvoid\nTriangleMuscleConstraint::\nEvaluateLMatrix(std::vector>& L_triplets)\n{\n\tEigen::MatrixXd Ai(3,9);\n\n\tEigen::Vector2d v = mInvDm*mFiberDirection;\n\tAi<<\n\t\t-v[0]-v[1],0,0,v[0],0,0,v[1],0,0,\n\t\t0,-v[0]-v[1],0,0,v[0],0,0,v[1],0,\n\t\t0,0,-v[0]-v[1],0,0,v[0],0,0,v[1];\n\n\tEigen::MatrixXd MuAiTAi = mStiffness*mArea*((Ai.transpose())*Ai);\n\n\tint idx[3] = {mi0,mi1,mi2};\n\tfor(int i =0;i<3;i++)\n\t{\n\t\tfor(int j=0;j<3;j++)\n\t\t{\n\t\t\tL_triplets.push_back(Eigen::Triplet(3*idx[i]+0, 3*idx[j]+0, MuAiTAi(3*i+0, 3*j+0)));\n\t\t\tL_triplets.push_back(Eigen::Triplet(3*idx[i]+0, 3*idx[j]+1, MuAiTAi(3*i+0, 3*j+1)));\n\t\t\tL_triplets.push_back(Eigen::Triplet(3*idx[i]+0, 3*idx[j]+2, MuAiTAi(3*i+0, 3*j+2)));\n\t\t\tL_triplets.push_back(Eigen::Triplet(3*idx[i]+1, 3*idx[j]+0, MuAiTAi(3*i+1, 3*j+0)));\n\t\t\tL_triplets.push_back(Eigen::Triplet(3*idx[i]+1, 3*idx[j]+1, MuAiTAi(3*i+1, 3*j+1)));\n\t\t\tL_triplets.push_back(Eigen::Triplet(3*idx[i]+1, 3*idx[j]+2, MuAiTAi(3*i+1, 3*j+2)));\n\t\t\tL_triplets.push_back(Eigen::Triplet(3*idx[i]+2, 3*idx[j]+0, MuAiTAi(3*i+2, 3*j+0)));\n\t\t\tL_triplets.push_back(Eigen::Triplet(3*idx[i]+2, 3*idx[j]+1, MuAiTAi(3*i+2, 3*j+1)));\n\t\t\tL_triplets.push_back(Eigen::Triplet(3*idx[i]+2, 3*idx[j]+2, MuAiTAi(3*i+2, 3*j+2)));\n\t\t}\n\t}\n}\nvoid\nTriangleMuscleConstraint::\nEvaluateDVector(const Eigen::VectorXd& x)\n{\n\tEigen::Vector3d x0(x.segment<3>(mi0*3));\n\n\tEigen::Matrix32d Ds, P;\n\tDs.col(0) = x.segment<3>(mi1*3) - x0;\n\tDs.col(1) = x.segment<3>(mi2*3) - x0;\n\n\tP.col(0) = Ds.col(0).normalized();\n\tP.col(1) = (Ds.col(1)-Ds.col(1).dot(P.col(0))*P.col(0)).normalized();\n\n\tEigen::Matrix2d F = P.transpose()*Ds*mInvDm;\n\n\tmd = (1.0-mActivationLevel)*P*F*mFiberDirection;\t\n}\nvoid\nTriangleMuscleConstraint::\nGetDVector(int& index,Eigen::VectorXd& d)\n{\n\td.segment<3>(3*(index)) = md;\n\tindex++;\n}", "meta": {"hexsha": "7fd2ebd44b5226fd0812838c916d6fb4633def34", "size": 3590, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "sim/fem/Constraint/TriangleMuscleConstraint.cpp", "max_stars_repo_name": "liusida/SoftCon", "max_stars_repo_head_hexsha": "39adcb1e2364dd7583b01966af7038d77977e083", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 140.0, "max_stars_repo_stars_event_min_datetime": "2019-09-05T03:24:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-18T13:44:48.000Z", "max_issues_repo_path": "sim/fem/Constraint/TriangleMuscleConstraint.cpp", "max_issues_repo_name": "liusida/SoftCon", "max_issues_repo_head_hexsha": "39adcb1e2364dd7583b01966af7038d77977e083", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2020-03-15T14:23:02.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-15T14:23:02.000Z", "max_forks_repo_path": "sim/fem/Constraint/TriangleMuscleConstraint.cpp", "max_forks_repo_name": "liusida/SoftCon", "max_forks_repo_head_hexsha": "39adcb1e2364dd7583b01966af7038d77977e083", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 23.0, "max_forks_repo_forks_event_min_datetime": "2019-09-08T02:51:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T12:49:05.000Z", "avg_line_length": 33.8679245283, "max_line_length": 143, "alphanum_fraction": 0.6601671309, "num_tokens": 1493, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8418256393148981, "lm_q2_score": 0.5926665999540698, "lm_q1q2_score": 0.49892193940692176}} {"text": "//==================================================================================================\n/*!\n @file\n\n @copyright 2016 NumScale SAS\n\n Distributed under the Boost Software License, Version 1.0.\n (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)\n*/\n//==================================================================================================\n#ifndef BOOST_SIMD_FUNCTION_TANH_HPP_INCLUDED\n#define BOOST_SIMD_FUNCTION_TANH_HPP_INCLUDED\n\n#if defined(DOXYGEN_ONLY)\nnamespace boost { namespace simd\n{\n\n /*!\n\n @ingroup group-hyperbolic\n This function object returns the hyperbolic tangent: \\f$\\sinh(x)/\\cosh(x)\\f$.\n\n @see sinh, cosh, sech, csch, sinhcosh\n\n\n @par Header \n\n @par Example:\n\n @snippet tanh.cpp tanh\n\n @par Possible output:\n\n @snippet tanh.txt tanh\n **/\n IEEEValue tanh(IEEEValue const& x);\n} }\n#endif\n\n#include \n#include \n\n#endif\n", "meta": {"hexsha": "2911bbe3f7946d97397ccb450b4372f86170724e", "size": 1022, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "third_party/boost/simd/function/tanh.hpp", "max_stars_repo_name": "SylvainCorlay/pythran", "max_stars_repo_head_hexsha": "908ec070d837baf77d828d01c3e35e2f4bfa2bfa", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 6.0, "max_stars_repo_stars_event_min_datetime": "2018-02-25T22:23:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-15T15:13:12.000Z", "max_issues_repo_path": "third_party/boost/simd/function/tanh.hpp", "max_issues_repo_name": "SylvainCorlay/pythran", "max_issues_repo_head_hexsha": "908ec070d837baf77d828d01c3e35e2f4bfa2bfa", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "third_party/boost/simd/function/tanh.hpp", "max_forks_repo_name": "SylvainCorlay/pythran", "max_forks_repo_head_hexsha": "908ec070d837baf77d828d01c3e35e2f4bfa2bfa", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 7.0, "max_forks_repo_forks_event_min_datetime": "2017-12-12T12:36:31.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-10T14:27:07.000Z", "avg_line_length": 23.2272727273, "max_line_length": 100, "alphanum_fraction": 0.5714285714, "num_tokens": 236, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8418256313782277, "lm_q2_score": 0.5926665999540698, "lm_q1q2_score": 0.4989219347031223}} {"text": "#include \n#include \n\n#include \n\n#define PI 3.1415926535897932384\n\ntypedef int int_t;\n\n//#include \"Tile3D.h\"\nKOKKOS_FUNCTION double left_flux(double left, double self, double cfl)\n{\n return -cfl * left + cfl * self;\n}\n\nKOKKOS_FUNCTION double right_flux(double self, double right, double cfl)\n{\n return -cfl * right + cfl * self;\n}\n\nKOKKOS_FUNCTION double stencil6p(double self, double xm, double xp, double ym,\n double yp, double zm, double zp, double cfl)\n{\n return (1.0 - 6.0 * cfl) * self + cfl * (xm + xp + ym + yp + zm + zp);\n}\n\nint main(int argc, char* argv[])\n{\n double cfl = 0.1; // must be less than 1/6\n\n namespace bpo = boost::program_options;\n bpo::options_description desc(\"ABFT3D\");\n\n desc.add_options()(\n \"xsize\", bpo::value()->default_value(98), \"X Dimension\");\n desc.add_options()(\n \"ysize\", bpo::value()->default_value(98), \"Y Dimension\");\n desc.add_options()(\n \"zsize\", bpo::value()->default_value(98), \"Z Dimension\");\n\n bpo::variables_map vm;\n\n // Setup commandline arguments\n bpo::store(bpo::parse_command_line(argc, argv, desc), vm);\n bpo::notify(vm);\n\n int xsize = vm[\"xsize\"].as();\n int ysize = vm[\"ysize\"].as();\n int zsize = vm[\"zsize\"].as();\n\n bool do_check_sum = true;\n\n Kokkos::initialize(argc, argv);\n {\n using range_policy = Kokkos::RangePolicy<>;\n\n Kokkos::View stencil_0(\n \"data1\", xsize + 2, ysize + 2, zsize + 2);\n Kokkos::View stencil_1(\n \"data2\", xsize + 2, ysize + 2, zsize + 2);\n\n // Soft copy\n auto stencil_old = stencil_0;\n auto stencil_new = stencil_1;\n auto stencil_tmp = stencil_new;\n Kokkos::View> checksum(\n \"checksum\", 1);\n\n Kokkos::View hchecksum(\n \"h_checksum\", 1);\n hchecksum[0] = 0.;\n\n Kokkos::deep_copy(checksum, hchecksum);\n\n // Initialize stencil\n Kokkos::parallel_for(\n \"init\", range_policy(0, ysize + 2), KOKKOS_LAMBDA(int j) {\n for (int k = 0; k < zsize + 2; ++k)\n {\n for (int i = 0; i < xsize + 2; ++i)\n {\n stencil_old(i, j, k) = std::sin(1.0 * PI * (double) i /\n (double) (xsize + 1)); //*\n stencil_new(i, j, k) = stencil_old(i, j, k);\n }\n }\n });\n Kokkos::fence();\n\n Kokkos::Timer timer;\n\n //\n // How to catch the error ... order matters. where does the error come from?\n // Physical location could matter (plus interaction with OS)\n //\n\n // Original checksum\n if (do_check_sum)\n {\n Kokkos::parallel_for(\n \"init_checksum\", range_policy(1, xsize + 1),\n KOKKOS_LAMBDA(int i) {\n for (int j = 1; j <= ysize; ++j)\n {\n for (int k = 1; k <= zsize; ++k)\n {\n checksum[0] += stencil_old(i, j, k);\n }\n }\n });\n Kokkos::fence();\n }\n\n Kokkos::deep_copy(hchecksum, checksum);\n\n std::cout << \"Initial Checksum \" << std::scientific << hchecksum[0]\n << std::endl;\n\n double chk_ = 0;\n for (int its = 0; its < 10; ++its)\n {\n if (do_check_sum)\n {\n // Deduct the checksum from the boundary\n Kokkos::parallel_for(\n \"first_loop\", range_policy(1, zsize + 1),\n KOKKOS_LAMBDA(int k) {\n for (int j = 1; j <= ysize; ++j)\n {\n checksum[0] -= left_flux(stencil_old(0, j, k),\n stencil_old(1, j, k), cfl) +\n right_flux(stencil_old(xsize, j, k),\n stencil_old(xsize + 1, j, k), cfl);\n }\n });\n Kokkos::fence();\n\n Kokkos::parallel_for(\n \"second_loop\", range_policy(1, zsize + 1),\n KOKKOS_LAMBDA(int k) {\n for (int i = 1; i <= xsize; ++i)\n {\n checksum[0] -= left_flux(stencil_old(i, 0, k),\n stencil_old(i, 1, k), cfl) +\n right_flux(stencil_old(i, ysize, k),\n stencil_old(i, ysize + 1, k), cfl);\n }\n });\n Kokkos::fence();\n\n Kokkos::parallel_for(\n \"third_loop\", range_policy(1, ysize + 1),\n KOKKOS_LAMBDA(int j) {\n for (int i = 1; i <= xsize; ++i)\n {\n checksum[0] -= left_flux(stencil_old(i, j, 0),\n stencil_old(i, j, 1), cfl) +\n right_flux(stencil_old(i, j, zsize),\n stencil_old(i, j, zsize + 1), cfl);\n }\n });\n Kokkos::fence();\n }\n\n // Apply stencil\n // Replace the loops by parallel for\n //\n Kokkos::parallel_reduce(\n \"stencil_op\", range_policy(1, xsize + 1),\n KOKKOS_LAMBDA(int i, double& chk_) {\n for (int j = 1; j <= ysize; ++j)\n {\n for (int k = 1; k <= zsize; ++k)\n {\n //std::cout << \"Old \" << i << \" \" << j << \" \" << k << \": \" << stencil_old(i,j,k) << std::endl;\n stencil_new(i, j, k) = stencil6p(\n stencil_old(i, j, k), stencil_old(i - 1, j, k),\n stencil_old(i + 1, j, k),\n stencil_old(i, j - 1, k),\n stencil_old(i, j + 1, k),\n stencil_old(i, j, k - 1),\n stencil_old(i, j, k + 1), cfl);\n\n chk_ += stencil_new(i, j, k);\n }\n }\n },\n chk_);\n Kokkos::fence();\n\n // chk_ = 0;\n\n if (do_check_sum)\n {\n Kokkos::deep_copy(hchecksum, checksum);\n\n // Testing the checksum\n auto error = std::abs((chk_ - hchecksum[0]));\n\n std::cout << \"New Checksum \" << std::scientific << chk_\n << \" and analytical checksum \" << hchecksum[0]\n << \" with Error: \" << error << std::endl;\n\n if (error > 1e-2)\n {\n std::cout << \"Failed \" << error << std::endl;\n }\n\n // Alternate stencil assignment\n if (its % 2 == 0)\n {\n stencil_old = stencil_1;\n stencil_new = stencil_0;\n }\n else\n {\n stencil_old = stencil_0;\n stencil_new = stencil_1;\n }\n\n chk_ = 0.;\n }\n }\n\n double time_taken = timer.seconds();\n\n std::cout << \"Executed in: \" << time_taken << std::endl;\n }\n\n Kokkos::finalize();\n return 0;\n}", "meta": {"hexsha": "0fd7564e472a5692ca45fa1fb70a865c71d1c345", "size": 7827, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "applications/non-resilient/ABFT3D.cpp", "max_stars_repo_name": "NK-Nikunj/Kokkos-Resilient-Spaces", "max_stars_repo_head_hexsha": "7203d40747d1961286cb4f9ca872354f395ac601", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "applications/non-resilient/ABFT3D.cpp", "max_issues_repo_name": "NK-Nikunj/Kokkos-Resilient-Spaces", "max_issues_repo_head_hexsha": "7203d40747d1961286cb4f9ca872354f395ac601", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "applications/non-resilient/ABFT3D.cpp", "max_forks_repo_name": "NK-Nikunj/Kokkos-Resilient-Spaces", "max_forks_repo_head_hexsha": "7203d40747d1961286cb4f9ca872354f395ac601", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7370689655, "max_line_length": 123, "alphanum_fraction": 0.41459052, "num_tokens": 1870, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8311430394931456, "lm_q2_score": 0.600188359260205, "lm_q1q2_score": 0.4988423771839308}} {"text": "/**\n * \\file libsanm/tensor_polymat.cpp\n * This file is part of SANM, a symbolic asymptotic numerical solver.\n */\n\n#include \"libsanm/stl.h\"\n#include \"libsanm/tensor_impl_helper.h\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace sanm;\n\nnamespace {\n\nusing cfp_t = std::complex;\n\n/*!\n * \\brief compute the DFT of given coeffs using the FFT algorithm\n *\n * Evaluate the polynomial defined by the \\p coeffs at the points \\f$\\omega_n^0,\n * \\ldots, \\omega_n^{n-1}\\f$ where \\f$n\\f$ is \\p nr_term and must be a power of\n * two.\n *\n * \\return the real part and the imaginary part\n */\nstd::pair fft(const TensorArray& coeffs,\n size_t nr_term) {\n sanm_assert(!(nr_term & (nr_term - 1)));\n if (coeffs.size() == 1) {\n return {TensorArray(nr_term, coeffs[0]),\n TensorArray(nr_term, coeffs[0].fill_with(0))};\n }\n if (nr_term == 1) {\n TensorND sum = coeffs[0];\n for (size_t i = 1; i < coeffs.size(); ++i) {\n sum += coeffs[i];\n }\n return {{sum}, {sum.fill_with(0)}};\n }\n\n TensorArray coeffs_even, coeffs_odd;\n coeffs_even.reserve((coeffs.size() + 1) / 2);\n coeffs_odd.reserve(coeffs.size() / 2);\n for (size_t i = 0; i < coeffs.size(); i += 2) {\n coeffs_even.emplace_back(coeffs[i]);\n if (i + 1 < coeffs.size()) {\n coeffs_odd.emplace_back(coeffs[i + 1]);\n }\n }\n\n auto result_even = fft(coeffs_even, nr_term / 2),\n result_odd = fft(coeffs_odd, nr_term / 2);\n\n TensorArray ret_real(nr_term), ret_imag(nr_term);\n for (size_t si = 0; si < nr_term / 2; ++si) {\n const TensorND& a_real = result_even.first[si];\n const TensorND& a_imag = result_even.second[si];\n const TensorND& b_real = result_odd.first[si];\n const TensorND& b_imag = result_odd.second[si];\n\n for (size_t i : {si, si + nr_term / 2}) {\n TensorND& y_real = ret_real[i];\n TensorND& y_imag = ret_imag[i];\n\n fp_t angle = fp_t(i) * (M_PI * 2) / fp_t(nr_term),\n x_real = std::cos(angle), x_imag = std::sin(angle);\n\n // y = a + x * b\n y_real.set_shape(coeffs[0].shape());\n y_imag.set_shape(coeffs[0].shape());\n as_vector_w(y_real) = as_vector_r(a_real) +\n as_vector_r(b_real) * x_real -\n as_vector_r(b_imag) * x_imag;\n as_vector_w(y_imag) = as_vector_r(a_imag) +\n as_vector_r(b_imag) * x_real +\n as_vector_r(b_real) * x_imag;\n }\n }\n return {std::move(ret_real), std::move(ret_imag)};\n}\n\nsize_t next_pow2(size_t x) {\n size_t y = 1;\n while (y < x) {\n y <<= 1;\n }\n return y;\n}\n\nTensorND compute_polymat_det_coeff_with_fft(const TensorArray& coeffs,\n size_t nr_term_pow2,\n size_t target_order) {\n std::pair polymat_dft;\n {\n SANM_SCOPED_PROFILER(\"polymat_det_fft\");\n polymat_dft = fft(coeffs, nr_term_pow2);\n }\n\n const size_t batch = coeffs[0].shape(0);\n const Eigen::Index mdim = coeffs[0].shape(1);\n ScopedAllowMalloc allow_malloc;\n Eigen::Matrix eigmat(mdim, mdim);\n\n TensorND ret{TensorShape{batch, 1}};\n auto ret_ptr = ret.woptr();\n for (size_t ib = 0; ib < batch; ++ib) {\n // do the inverse dft to solve the target coefficient\n cfp_t accum = 0;\n for (size_t i = 0; i < nr_term_pow2; ++i) {\n EigenMatDyn mreal{const_cast(polymat_dft.first[i].ptr()) +\n ib * mdim * mdim,\n mdim, mdim},\n mimag{const_cast(polymat_dft.second[i].ptr()) +\n ib * mdim * mdim,\n mdim, mdim};\n eigmat.real() = mreal;\n eigmat.imag() = mimag;\n cfp_t dfti = eigmat.determinant();\n fp_t angle =\n -(M_PI * 2) * fp_t(i * target_order) / fp_t(nr_term_pow2);\n accum += dfti * cfp_t{std::cos(angle), std::sin(angle)};\n }\n accum /= fp_t(nr_term_pow2);\n sanm_assert(std::fabs(accum.imag()) <\n 1e-4 * std::max(1, std::fabs(accum.real())),\n \"IDFT not real: real=%g imag=%g\", accum.real(),\n accum.imag());\n ret_ptr[ib] = accum.real();\n }\n\n return ret;\n}\n\nTensorArray transpose_coeffs(const TensorArray& coeffs) {\n TensorArray ret;\n ret.resize(coeffs.size());\n for (size_t i = 0; i < coeffs.size(); ++i) {\n TensorND& dst = ret[i];\n const TensorND& src = coeffs[i];\n sanm_assert(src.rank() == 3);\n size_t n = src.shape(0), m0 = src.shape(1), m1 = src.shape(2);\n dst.set_shape({m0, m1, n});\n EigenMatDyn mdst{dst.woptr(), static_cast(n),\n static_cast(m0 * m1)},\n msrc{const_cast(src.ptr()),\n static_cast(m0 * m1),\n static_cast(n)};\n mdst = msrc.transpose();\n }\n return ret;\n}\n\nusing EigenVecArr = std::span;\n\nvoid conv(EigenVecArr dst, EigenVecArr x, EigenVecArr y) {\n for (EigenVec& i : dst) {\n i.setZero();\n }\n for (size_t i = 0; i < x.size(); ++i) {\n for (size_t j = 0; j < y.size() && i + j < dst.size(); ++j) {\n dst[i + j].array() += x[i].array() * y[j].array();\n }\n }\n}\n\nvoid conv_k(size_t k, EigenVec& dst, EigenVecArr x, EigenVecArr y) {\n bool first = true;\n for (size_t i = std::max(0, int(k) + 1 - int(y.size()));\n i < x.size() && i <= k; ++i) {\n if (first) {\n dst = x[i].array() * y[k - i].array();\n first = false;\n } else {\n dst.array() += x[i].array() * y[k - i].array();\n }\n }\n if (first) {\n dst.setZero();\n }\n}\n\nclass EigenVecArrStorage : public ObjArray {\npublic:\n explicit EigenVecArrStorage(size_t size)\n : ObjArray(size, nullptr, 0, 1) {}\n};\n\n/*!\n * \\brief Compute a single term in the expansion of determinant\n *\n * The result is the product of mat[i][row_indices[i]]\n * Result is negated if row_indices[0]<0 or row_indices[1]<0\n *\n * \\param coeffs_trans transposed coefficients in (m, m, batch) shape\n * \\param k target term order\n */\nclass DetSingleTermCompute {\n TensorArray m_coeffs_trans;\n const size_t m_batch;\n const size_t m_k;\n std::unique_ptr m_buf_storage;\n TensorND m_ret;\n EigenVecArrStorage m_buf0, m_buf1, m_opr0, m_opr1;\n\npublic:\n DetSingleTermCompute(const TensorArray& coeffs, size_t k)\n : m_coeffs_trans{transpose_coeffs(coeffs)},\n m_batch{m_coeffs_trans[0].shape(2)},\n m_k{k},\n m_buf_storage{new fp_t[2 * (k + 1) * m_batch]},\n m_ret{TensorShape{m_batch, 1}},\n m_buf0{k + 1},\n m_buf1{k + 1},\n m_opr0{coeffs.size()},\n m_opr1{coeffs.size()} {\n auto ptr = m_buf_storage.get();\n for (size_t i = 0; i <= k; ++i) {\n reset(m_buf0[i], ptr + (i * 2) * m_batch, m_batch);\n reset(m_buf1[i], ptr + (i * 2 + 1) * m_batch, m_batch);\n }\n }\n\n TensorND operator()(std::span row_indices) {\n auto extract = [&row_indices, this, msize = m_coeffs_trans[0].shape(1)](\n EigenVecArr dst, int r) {\n for (size_t i = 0; i < m_coeffs_trans.size(); ++i) {\n size_t c = std::abs(row_indices[r]);\n reset(dst[i],\n m_coeffs_trans[i].ptr() + (r * msize + c) * m_batch,\n m_batch);\n }\n };\n\n auto conv_ret = [this, &row_indices](EigenVecArr x, EigenVecArr y) {\n auto ret_vec = as_vector_w(m_ret);\n conv_k(m_k, ret_vec, x, y);\n if (row_indices[0] < 0 || row_indices[1] < 0) {\n m_ret.inplace_neg();\n }\n return m_ret;\n };\n\n extract(m_opr0, 0);\n extract(m_opr1, 1);\n if (row_indices.size() == 2) {\n return conv_ret(m_opr0, m_opr1);\n }\n\n EigenVecArrStorage *prod = &m_buf0, *prod_next = &m_buf1;\n conv(*prod, m_opr0, m_opr1);\n\n for (size_t i = 2; i + 1 < row_indices.size(); ++i) {\n extract(m_opr0, i);\n conv(*prod_next, *prod, m_opr0);\n std::swap(prod, prod_next);\n }\n extract(m_opr0, row_indices.size() - 1);\n return conv_ret(*prod, m_opr0);\n }\n};\n\n/*!\n * \\brief get the terms in the expansion of determinant\n * \\return vector of size m! * m; the sign of items i*m are the sign of the\n * terms\n */\nconst std::vector& get_det_terms(size_t m) {\n static std::mutex mutex;\n static std::vector> results{std::vector{0}};\n static auto compute = [&](int size) {\n sanm_assert(results.size() == static_cast(size - 1));\n std::vector& cur = results.emplace_back();\n const auto& prev = results[size - 2];\n for (int i = 0; i < size; ++i) {\n for (size_t j = 0; j < prev.size();) {\n size_t r0 = cur.size();\n bool neg = i % 2;\n cur.push_back(i);\n for (int jdt = 0; jdt < size - 1; ++jdt) {\n int p = prev[j + jdt];\n if (p < 0) {\n neg = !neg;\n p = -p;\n }\n cur.push_back(p + (p >= i));\n }\n if (neg) {\n if (cur[r0]) {\n cur[r0] = -cur[r0];\n } else {\n cur[r0 + 1] = -cur[r0 + 1];\n }\n }\n j += size - 1;\n }\n }\n#if 0\n printf(\"det(%d):\\n\", size);\n for (size_t i = 0; i < cur.size(); ++i) {\n printf(\"%d\", cur[i]);\n if ((i + 1) % size == 0) {\n printf(\"\\n\");\n } else {\n printf(\" \");\n }\n }\n#endif\n };\n\n sanm_assert(m >= 1);\n std::lock_guard mutex_lg{mutex};\n if (m - 1 < results.size()) {\n return results[m - 1];\n }\n for (size_t i = results.size() + 1; i <= m; ++i) {\n compute(i);\n }\n return results[m - 1];\n}\n\nTensorND compute_polymat_det_coeff_by_expanding(const TensorArray& coeffs,\n size_t target_order) {\n DetSingleTermCompute tc{coeffs, target_order};\n size_t m = coeffs[0].shape(1);\n const auto& terms = get_det_terms(m);\n\n TensorND ret;\n for (size_t i = 0; i < terms.size(); i += m) {\n TensorND cur = tc({terms.data() + i, m});\n if (!i) {\n ret = cur;\n } else {\n ret += cur;\n }\n }\n return ret;\n}\n} // anonymous namespace\n\nTensorND sanm::compute_polymat_det_coeff(const TensorArray& coeffs,\n size_t order) {\n SANM_SCOPED_PROFILER(\"polymat_det\");\n sanm_assert(!coeffs.empty() && coeffs[0].rank() == 3 &&\n coeffs[0].shape(1) == coeffs[0].shape(2));\n for (size_t i = 1; i < coeffs.size(); ++i) {\n sanm_assert(coeffs[i].shape() == coeffs[0].shape());\n }\n\n const size_t batch = coeffs[0].shape(0), mdim = coeffs[0].shape(1),\n nr_term = (coeffs.size() - 1) * mdim + 1;\n sanm_assert(mdim >= 2);\n\n if (order >= nr_term) {\n return TensorND{TensorShape{batch, 1}}.fill_with_inplace(0);\n }\n if (order == 0) {\n return coeffs[0].batched_determinant();\n }\n if (order == 1) {\n TensorND ret{TensorShape{batch, 1}},\n src = coeffs[0].batched_cofactor() * coeffs[1];\n EigenMatDyn smat{const_cast(src.ptr()),\n static_cast(mdim * mdim),\n static_cast(batch)};\n as_vector_w(ret) = smat.colwise().sum().transpose();\n return ret;\n }\n\n if (mdim <= 4) {\n return compute_polymat_det_coeff_by_expanding(coeffs, order);\n }\n\n return compute_polymat_det_coeff_with_fft(coeffs, next_pow2(nr_term),\n order);\n}\n", "meta": {"hexsha": "97fca3ecfdff01823e49b16ff08ef22058acd184", "size": 12639, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "libsanm/tensor_polymat.cpp", "max_stars_repo_name": "jia-kai/SANM", "max_stars_repo_head_hexsha": "2673ac476b3d2978a52bf47bc12d3402ea20f211", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 25.0, "max_stars_repo_stars_event_min_datetime": "2021-05-19T09:27:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-22T15:22:05.000Z", "max_issues_repo_path": "libsanm/tensor_polymat.cpp", "max_issues_repo_name": "jia-kai/SANM", "max_issues_repo_head_hexsha": "2673ac476b3d2978a52bf47bc12d3402ea20f211", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2021-09-03T05:31:47.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-05T01:37:42.000Z", "max_forks_repo_path": "libsanm/tensor_polymat.cpp", "max_forks_repo_name": "jia-kai/SANM", "max_forks_repo_head_hexsha": "2673ac476b3d2978a52bf47bc12d3402ea20f211", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.2605263158, "max_line_length": 80, "alphanum_fraction": 0.5093757418, "num_tokens": 3455, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8311430353105599, "lm_q2_score": 0.600188359260205, "lm_q1q2_score": 0.49884237467359155}} {"text": "\n/*=========================================================================\n\n Program: Small Body Geophysical Analysis\n Module: SBGATSphericalHarmo.hpp\n\n Class derived from VTK's vtkPolyDataAlgorithm by Benjamin Bercovici \n\n Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen\n All rights reserved.\n See Copyright.txt or http://www.kitware.com/Copyright.htm for details.\n\n This software is distributed WITHOUT ANY WARRANTY; without even\n the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR\n PURPOSE. See the above copyright notice for more information.\n\n=========================================================================*/\n/**\n @class SBGATSphericalHarmo\n @author Benjamin Bercovici\n @author Jay McMahon\n@date October 2018\n @brief Computes/evaluates the outer spherical harmonics expansion of the exterior gravity\n field around a constant density polyhedron\n\n @details Computes/evaluates the outer spherical harmonics expansion of the exterior gravity\nfield around a constant density polyhedron. Normalized or non-normalized coefficients can be computed.\nThe computed coefficients are completely independent of the mass and density of the considered object\nas they are only a geometric construct, thanks to the constant-density assumption. This class will always use results expressed in `meters` as their distance unit (e.g accelerations in m/s^2, potentials in m^2/s^2,...) . Unit consistency is enforced through the use of the SetScaleMeters()\nand SetScaleKiloMeters() method. \n\nAdapted from the works of Yu Takahashi and Siamak Hesar by Benjamin Bercovici, University of Colorado Boulder\nfor more details, see \nWerner, R. a. (1997). \nSpherical harmonic coefficients for the potential of a constant-density polyhedron. \nComputers & Geosciences, \n23(10), \n1071\u20131077. \nhttps://doi.org/10.1016/S0098-3004(97)00110-6\n@copyright MIT License, Benjamin Bercovici and Jay McMahon\n*/\n\n#ifndef SBGATSphericalHarmo_h\n#define SBGATSphericalHarmo_h\n\n#include // For export macro\n#include \n#include \n\nclass VTKFILTERSCORE_EXPORT SBGATSphericalHarmo : public vtkPolyDataAlgorithm{\npublic:\n /**\n * Constructs with initial values of zero.\n */\n static SBGATSphericalHarmo *New();\n\n vtkTypeMacro(SBGATSphericalHarmo,vtkPolyDataAlgorithm);\n void PrintSelf(std::ostream& os, vtkIndent indent) override;\n void PrintHeader(std::ostream& os, vtkIndent indent) override;\n void PrintTrailer(std::ostream& os, vtkIndent indent) override;\n\n /**\n Sets degree of spherical harmonics expansion\n @param deg degree of spherical harmonics expansion\n */\n void SetDegree(const unsigned int deg){\n this -> degree = deg;\n this -> degreeSet = true;\n }\n\n /**\n Sets reference radius in spherical harmonics expansion. Must be consistent \n with the units in which the shape coordinates are expressed\n @param ref_radius reference radius in spherical harmonics expansion\n */\n void SetReferenceRadius(const double ref_radius){\n this -> referenceRadius = ref_radius;\n this -> referenceRadiusSet = true;\n }\n\n /**\n Sets polyhedron density \n @param density bulk density of polyhedron (kg/m^3)\n */\n void SetDensity(const double density){\n this -> density = density;\n this -> densitySet = true;\n }\n\n\n /*\n Will return normalized coefficients next (default is normalized)\n */\n void IsNormalized(){\n this -> normalized = true;\n }\n\n /*\n Will return non-normalized coefficients next (default is normalized)\n */\n void IsNonNormalized(){\n this -> normalized = false;\n }\n\n /*\n Return Cnm array of coefficients ordered like in the exemple below where degree = 5\n\n 1 0 0 0 0 0\n C_10 C_11 C_12 C_13 C_14 C_15\n C_20 C_21 C_22 C_23 C_24 C_25 \n C_30 C_31 C_32 C_33 C_34 C_35 \n C_40 C_41 C_42 C_43 C_44 C_45\n C_50 C_51 C_52 C_53 C_54 C_55\n\n\n @return Cnm array of coefficients\n */\n arma::mat GetCnm() {this -> Update(); return this -> Cnm;}\n\n\n /*\n Return Cnm array of coefficients ordered like in the exemple below where degree = 5\n\n 1 0 0 0 0 0\n C_10 C_11 C_12 C_13 C_14 C_15\n C_20 C_21 C_22 C_23 C_24 C_25 \n C_30 C_31 C_32 C_33 C_34 C_35 \n C_40 C_41 C_42 C_43 C_44 C_45\n C_50 C_51 C_52 C_53 C_54 C_55\n\n\n @param[out] Cnm array of coefficients\n */\n void GetCnm(arma::mat & C_nm) {this -> Update(); C_nm = this -> Cnm;}\n\n\n /*\n \n Return Snm array of coefficients ordered like in the exemple below where degree = 5\n\n 0 0 0 0 0 0\n 0 S_11 S_12 S_13 S_14 S_15\n 0 S_21 S_22 S_23 S_24 S_25 \n 0 S_31 S_32 S_33 S_34 S_35 \n 0 S_41 S_42 S_43 S_44 S_45\n 0 S_51 S_52 S_53 S_54 S_55\n\n\n @return Snm array of coefficients\n */\n arma::mat GetSnm() {this -> Update(); return this -> Snm;}\n\n /*\n Return Snm array of coefficients ordered like in the exemple below where degree = 5\n\n 0 0 0 0 0 0\n 0 S_11 S_12 S_13 S_14 S_15\n 0 S_21 S_22 S_23 S_24 S_25 \n 0 S_31 S_32 S_33 S_34 S_35 \n 0 S_41 S_42 S_43 S_44 S_45\n 0 S_51 S_52 S_53 S_54 S_55\n\n @param[out] Snm array of coefficients\n */\n void GetSnm(arma::mat & S_nm) {this -> Update(); S_nm = this -> Snm;}\n\n /**\n Return the acceleration due to gravity at the specified point\n @param pos position at which the acceleration must be evaluated (meters)\n @return acceleration (m / s ^ 2)\n */\n arma::vec::fixed<3> GetAcceleration(const arma::vec::fixed<3> & pos);\n\n\n /** \n Evaluates the gravity gradient matrix (the partial derivative of the spherical \n harmonics acceleration with respect to the position vector) at the prescribed\n location. Note that this gravity gradient matrix is expressed in the body-fixed frame of the considered object.\n @param[in] pos position at which the gravity gradient matrix must be evaluated, expressed in the same frame/same unit L as \n the shape used to build the spherical harmonics expansion.\n @param[out] dAccdPos container holding the gravity gradient matrix (1 / s ^ 2)\n */\n void GetGravityGradientMatrix(const arma::vec::fixed<3> & pos,\n arma::mat::fixed<3,3> & dAccdPos);\n\n\n /** \n Evaluates the partial derivative of the spherical harmonics acceleration\n with respect to the spherical harmonics coefficients. The coefficients and the partials of the acceleration w/r to the coefficients\n are ordered like so:\n \n Cnm :\n(C_00 == 1) 0 0 0 0 0\n C_10 C_11 0 0 0 0 \n C_20 C_21 C_22 0 0 0 \n C_30 C_31 C_32 C_33 0 0 \n C_40 C_41 C_42 C_43 C_44 0\n C_50 C_51 C_52 C_53 C_54 C_55\n \n Snm :\n 0 0 0 0 0 0\n 0 S_11 0 0 0 0\n 0 S_21 S_22 0 0 0 \n 0 S_31 S_32 S_33 0 0 \n 0 S_41 S_42 S_43 S_44 0\n 0 S_51 S_52 S_53 S_54 S_55\n\n so \n\n partial_C = [ dA/C_00,dA/C_10,dA/C_11,dA/C_20,dA/C_21,dA/C_22,...]\n partial_S = [ dA/S_11,dA/C_21,dA/S_22,dA/S_31,dA/S_32,dA/S_33,...]\n\n @param[in] pos position at which the partial derivatives must be evaluated, expressed in the same frame/same unit as \n the shape used to build the spherical harmonics expansion.\n @param[out] partial_C container holding the partial derivative of the acceleration \n with respect to the Cnm spherical harmonic coefficients. If the degree/order is n, then there are (n+1) * (n + 2)/2 non-zero Cnm coefficients\n \n @param[out] partial_S container holding the partial derivative of the acceleration \n with respect to the Snm spherical harmonic coefficients. If the degree/order is n, then there are (n+1) * (n + 2)/2 - n non-zero Snm coefficients\n\n */\n void GetPartialHarmonics(const arma::vec::fixed<3> & pos,\n arma::mat & partial_C, \n arma::mat & partial_S);\n /**\n Sets the scale factor to 1, indicative that the polydata has its coordinates expressed in meters\n */\n void SetScaleMeters() { this -> scaleFactor = 1; this -> scaleFactorSet = true;}\n\n /**\n Sets the scale factor to 1000, indicative that the polydata has its coordinates expressed in kilometers\n */\n void SetScaleKiloMeters() { this -> scaleFactor = 1000; this -> scaleFactorSet = true;}\n\n\n /**\n Exports the computed spherical harmonics expansion to \n a JSON file. The saved fields are:\n - facets == number of facets\n - vertices == number of vertices\n - totalMass : {value, unit}\n - density : {value, unit}\n - reference_radius : {value, unit}\n - normalized == true if the coefficients are normalized\n - degree == degree of the spherical expansion\n - Cnm_coefs - vector of coefficients triplets {n,m,Cnm}\n - Snm_coefs - vector of coefficients triplets {n,m,Snm}\n @param path JSON file where the spherical harmonics model will be saved\n\n */\n void SaveToJson(std::string path) const;\n\n /**\n Loads a previously computed spherical harmonics expansion\n from a JSON file. Will set the appropriate fields in the SBGATSphericalHarmo object to\n allow calls to other methods. \n The loadable fields are:\n - facets == number of facets (not needed for evaluation)\n - vertices == number of vertices (not needed for evaluation)\n - totalMass : {value, unit}\n - density : {value, unit}\n - reference_radius : {value, unit}\n - normalized == true if the coefficients are normalized\n - degree == degree of the spherical expansion\n - Cnm_coefs - vector of coefficients triplets {n,m,Cnm}\n - Snm_coefs - vector of coefficients triplets {n,m,Snm}\n @param path JSON file storing the spherical harmonics model\n */\n void LoadFromJson(std::string path);\n\n /**\n Sets the Cnm coefficients. There is normally no need to use this method outside of Sbgat's tests\n @param[in] Cnm coefficients\n */\n void SetCnm(arma::mat Cnm){this ->Cnm =Cnm;}\n\n /**\n Sets the Snm coefficients. There is normally no need to use this method outside of Sbgat's tests\n @param[in] Snm coefficients\n */\n void SetSnm(arma::mat Snm){this ->Snm =Snm;}\n\n\n\nprotected:\n SBGATSphericalHarmo();\n ~SBGATSphericalHarmo() override;\n\n int RequestData(vtkInformation* request,\n vtkInformationVector** inputVector,\n vtkInformationVector* outputVector) override;\n\n arma::mat Cnm;\n arma::mat Snm;\n\n double referenceRadius;\n double density;\n double totalMass;\n double scaleFactor = 1;\n\n bool normalized;\n unsigned int degree;\n\n int n_facets;\n int n_vertices;\n\n bool degreeSet;\n bool densitySet;\n bool referenceRadiusSet;\n bool scaleFactorSet;\n bool setFromJSON;\n\nprivate:\n SBGATSphericalHarmo(const SBGATSphericalHarmo&) = delete;\n void operator=(const SBGATSphericalHarmo&) = delete;\n};\n\n#endif\n\n\n", "meta": {"hexsha": "626418cc2d286c5866d864033811dcba7f5cc9dc", "size": 10637, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "SbgatCore/include/SbgatCore/SBGATSphericalHarmo.hpp", "max_stars_repo_name": "bbercovici/SBGAT", "max_stars_repo_head_hexsha": "93e935baff49eb742470d7d593931f0573f0c062", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6.0, "max_stars_repo_stars_event_min_datetime": "2017-11-29T02:47:00.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-26T05:25:44.000Z", "max_issues_repo_path": "SbgatCore/include/SbgatCore/SBGATSphericalHarmo.hpp", "max_issues_repo_name": "bbercovici/SBGAT", "max_issues_repo_head_hexsha": "93e935baff49eb742470d7d593931f0573f0c062", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 34.0, "max_issues_repo_issues_event_min_datetime": "2017-02-09T15:38:35.000Z", "max_issues_repo_issues_event_max_datetime": "2019-04-25T20:53:37.000Z", "max_forks_repo_path": "SbgatCore/include/SbgatCore/SBGATSphericalHarmo.hpp", "max_forks_repo_name": "bbercovici/SBGAT", "max_forks_repo_head_hexsha": "93e935baff49eb742470d7d593931f0573f0c062", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2019-03-12T12:20:25.000Z", "max_forks_repo_forks_event_max_datetime": "2019-03-12T12:20:25.000Z", "avg_line_length": 33.1370716511, "max_line_length": 289, "alphanum_fraction": 0.6935226098, "num_tokens": 3164, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8577681086260461, "lm_q2_score": 0.5813030906443133, "lm_q1q2_score": 0.49862325260044765}} {"text": "#include \n#include \n#include \n#include \"larsson_iccv19.h\"\n#include \"kukelova_iccv13.h\"\n#include \"../misc/univariate.h\"\n#include \"../misc/distortion.h\"\n\nusing namespace Eigen;\nusing namespace radialpose;\nusing std::complex;\n\nstatic const double SMALL_NUMBER = 1e-8;\nstatic const double DAMP_FACTOR = 1e-8;\n\n\n\nstatic void linsolve_known_pose_dist(const Points2D &x, const Points3D &X, double t3, int Np, int Nd, double damp, Camera* camera)\n{\n\tint n_pts = x.cols();\n\tint n_param = 1 + Np + Nd;\n\n\tint n_rows = n_pts;\n\tif (damp > 0)\n\t\tn_rows += n_param - 1;\n\n\t// System matrix for normal equations\n\tEigen::Matrix A(n_rows, n_param);\n\tEigen::Matrix b(n_rows, 1);\n\tA.setZero(); b.setZero();\n\n\tdouble r_pow[4];\n\tfor (int i = 0; i < n_pts; i++) {\n\n\t\tdouble uu, uv;\n\t\tif (std::abs(x(0, i)) < SMALL_NUMBER) {\n\t\t\tuv = x(1, i);\n\t\t\tuu = X(1, i) / (t3 + X(2, i));\n\t\t}\n\t\telse {\n\t\t\tuv = x(0, i);\n\t\t\tuu = X(0, i) / (t3 + X(2, i));\n\t\t}\n\n\t\t//double rd2 = x(0, i) * x(0, i) + x(1, i) * x(1, i);\n\t\tdouble ru2 = (X(0, i) * X(0, i) + X(1, i) * X(1, i)) / ((X(2, i) + t3) * (X(2, i) + t3));\t\t\t\t\n\n\t\t// compute powers\n\t\tr_pow[0] = ru2; // r^2\n\t\tr_pow[1] = ru2 * ru2; // r^4\n\t\tr_pow[2] = ru2 * r_pow[1]; // r^6\n\t\tr_pow[3] = ru2 * r_pow[2]; // r^8\n\n\t\tA(i, 0) = uu;\n\t\tfor (int k = 0; k < Np; k++)\n\t\t\tA(i, 1 + k) = r_pow[k] * uu;\n\t\tfor (int k = 0; k < Nd; k++)\n\t\t\tA(i, 1 + Np + k) = -uv * r_pow[k];\n\t\tb(i) = uv;\n\t}\n\tif (damp > 0) {\n\t\tfor (int i = 1; i < n_param; i++)\n\t\t\tA(n_pts - 1 + i, i) += damp;\n\t}\n\n\t//Eigen::JacobiSVD> svd(A);\t\n\t//std::cout << \"A: \" << A << \"\\n\";\n\t//std::cout << \"svd(A): \" << svd.singularValues() << \"\\n\";\n\n\tEigen::Matrix sol = A.fullPivHouseholderQr().solve(b);\n\tcamera->focal = sol(0);\n\tfor (int i = 1; i < n_param; i++) {\n\t\tif (i <= Np)\n\t\t\tcamera->dist_params.push_back(sol(i) / camera->focal);\n\t\telse\n\t\t\tcamera->dist_params.push_back(sol(i));\n\t}\n}\n\nstatic void linsolve_known_pose_undist(const Points2D &x, const Points3D &X, double t3, int Np, int Nd, double damp, Camera* camera)\n{\n\tint n_pts = x.cols();\n\tint n_param = 1 + Np + Nd;\n\n\tint n_rows = n_pts;\n\tif (damp > 0)\n\t\tn_rows += n_param - 1;\n\n\t// System matrix for normal equations\n\tEigen::Matrix A(n_rows, n_param);\n\tEigen::Matrix b(n_rows, 1);\n\tA.setZero(); b.setZero();\n\n\tdouble r_pow[4];\n\tfor (int i = 0; i < n_pts; i++) {\n\n\t\tdouble uu, uv;\n\t\tif (std::abs(x(0, i)) < SMALL_NUMBER) {\n\t\t\tuv = x(1, i);\n\t\t\tuu = X(1, i) / (t3 + X(2, i));\n\t\t} else {\n\t\t\tuv = x(0, i);\n\t\t\tuu = X(0, i) / (t3 + X(2, i));\n\t\t}\n\n\t\tdouble rd2 = x(0, i) * x(0, i) + x(1, i) * x(1, i);\t\t\n\t\t\t\n\t\t// compute powers\n\t\tr_pow[0] = rd2; // r^2\n\t\tr_pow[1] = rd2 * rd2; // r^4\n\t\tr_pow[2] = rd2 * r_pow[1]; // r^6\n\t\tr_pow[3] = rd2 * r_pow[2]; // r^8\n\n\t\tA(i, 0) = uu;\n\t\tfor (int k = 0; k < Np; k++)\n\t\t\tA(i, 1 + k) = -r_pow[k] * uv;\n\t\tfor (int k = 0; k < Nd; k++)\n\t\t\tA(i, 1 + Np + k) = uu * r_pow[k];\n\t\tb(i) = uv;\n\t}\n\tif (damp > 0) {\n\t\tfor (int i = 1; i < n_param; i++)\n\t\t\tA(n_pts - 1 + i, i) += damp;\n\t}\n\n\n\tEigen::Matrix sol = A.fullPivHouseholderQr().solve(b);\n\tcamera->focal = sol(0);\n\tdouble f2 = camera->focal * camera->focal;\n\tdouble f2k = f2;\n\t// We have the real mu_k = sol * f^2k and lambda_k = sol * f^2k-1\n\tfor (int i = 1; i < n_param; i++) {\n\t\tif (i == Np+1)\n\t\t\tf2k = camera->focal;\n\t\tcamera->dist_params.push_back(sol(i) * f2k);\n\t\tf2k = f2k * f2;\n\t}\n}\n\n\n// Small refinement on the minimal sample. TODO: Move this to refinement.cc\ntemplate\nvoid radial_refinement_dist(Camera &p, const Points2D &x, const Points3D &X, double damp_factor) {\n\t// It is assumed that X is already rotated by p\n\tconstexpr int n_pts = std::max(2 + Np + Nd, 5);\n\n\tMatrix J;\n\tMatrix res;\n\tMatrix dx;\n\n\tfor (int i = 0; i < Np; ++i)\n\t\tp.dist_params[i] *= p.focal;\n\n\tfor (int iter = 0; iter < 5; ++iter) {\n\t\tfor (int i = 0; i < n_pts; ++i) {\n\t\t\tdouble d = X(2, i) + p.t(2);\n\t\t\tdouble d2 = d * d;\n\t\t\tdouble r2 = X.block<2, 1>(0, i).squaredNorm();\n\n\t\t\tdouble num = p.focal;\n\t\t\tdouble denom = d;\n\n\t\t\tdouble dnum_dt = 0.0;\n\t\t\tdouble ddenom_dt = 1.0;\n\n\n\t\t\tfor (int k = 0; k < Np; k++) {\n\t\t\t\tdouble r2d2 = std::pow(r2 / d2, k + 1);\n\n\t\t\t\tnum += p.dist_params[k] * r2d2;\n\t\t\t\tdnum_dt += - p.dist_params[k] * 2 * (k + 1) * r2d2 / d;\n\n\t\t\t}\n\t\t\tfor (int k = 0; k < Nd; k++) {\n\t\t\t\tdouble r2d2 = std::pow(r2 / d2, k + 1);\n\n\t\t\t\tdenom += d * p.dist_params[Np + k] * r2d2;\n\t\t\t\tddenom_dt += -p.dist_params[Np + k] * (2 * k + 1)*r2d2;\n\t\t\t}\n\n\t\t\tdouble factor = num / denom;\n\t\t\tdouble dfactor_dt = (dnum_dt*denom - num * ddenom_dt) / (denom*denom);\n\t\t\tdouble dfactor_df = 1 / denom;\n\n\t\t\tres(2 * i + 0) = factor * X(0, i) - x(0, i);\n\t\t\tres(2 * i + 1) = factor * X(1, i) - x(1, i);\n\n\t\t\tJ(2 * i + 0, 0) = dfactor_dt * X(0, i);\n\t\t\tJ(2 * i + 1, 0) = dfactor_dt * X(1, i);\n\t\t\tJ(2 * i + 0, 1) = dfactor_df * X(0, i);\n\t\t\tJ(2 * i + 1, 1) = dfactor_df * X(1, i);\n\n\t\t\tfor (int k = 0; k < Np; ++k) {\n\t\t\t\tdouble r2d2 = std::pow(r2 / d2, k + 1);\n\t\t\t\tdouble dfactor_dmu = r2d2 / denom;\n\n\t\t\t\tJ(2 * i + 0, 2 + k) = dfactor_dmu * X(0, i);\n\t\t\t\tJ(2 * i + 1, 2 + k) = dfactor_dmu * X(1, i);\n\t\t\t}\n\n\t\t\tfor (int k = 0; k < Nd; ++k) {\n\t\t\t\tdouble r2d2 = std::pow(r2 / d2, k + 1);\n\t\t\t\tdouble dfactor_dlambda = -d * r2d2 * num / (denom*denom);\n\t\t\t\tJ(2 * i + 0, 2 + Np + k) = dfactor_dlambda * X(0, i);\n\t\t\t\tJ(2 * i + 1, 2 + Np + k) = dfactor_dlambda * X(1, i);\n\t\t\t}\n\n\t\t}\n\n\t\tif (res.norm() < SMALL_NUMBER)\n\t\t\tbreak;\n\n\t\t//std::cout << \"res = \" << res << \"\\n\";\n\t\t//std::cout << \"jac = \" << J << \"\\n\";\n\n\t\tMatrix H = J.transpose()*J;\n\t\tH.diagonal().array() += 1e-6; // LM dampening\n\t\tMatrix g = -J.transpose()*res;\n\n\t\tif (Nd > 0 && Np > 0 && damp_factor > 0) {\n\t\t\t// For rational models we add a small dampening factor\t\t\t\n\t\t\tH.template block(2, 2).diagonal().array() += damp_factor;\n\t\t\tfor(int i = 0; i < Np+Nd; i++)\n\t\t\t\tg(2+i) -= damp_factor * p.dist_params[i];\n\t\t}\n\n\n\t\tdx = H.ldlt().solve(g);\n\n\t\tp.t(2) += dx(0);\n\t\tp.focal += dx(1);\n\t\tfor (int i = 0; i < Np; ++i)\n\t\t\tp.dist_params[i] += dx(2 + i);\n\t\tfor (int i = 0; i < Nd; ++i)\n\t\t\tp.dist_params[i + Np] += dx(Np + 2 + i);\n\n\t\tif (dx.array().abs().maxCoeff() < SMALL_NUMBER)\n\t\t\tbreak;\n\t}\n\n\tfor (int i = 0; i < Np; ++i)\n\t\tp.dist_params[i] /= p.focal;\n}\n\n\ntemplate\nvoid radial_refinement_undist(Camera &p, const Points2D &x, const Points3D &X, double damp_factor) {\n\t// It is assumed that X is already rotated by p\n\tconstexpr int n_pts = std::max(2 + Np + Nd, 5);\n\n\tMatrix J;\n\tMatrix res;\n\tMatrix dx;\n\n\t// Change of variables\n\tdouble f2 = p.focal * p.focal;\n\tdouble f2k = f2;\n\tfor (int i = 0; i < Np; ++i) {\n\t\tp.dist_params[i] /= f2k;\n\t\tf2k *= f2;\n\t}\n\tf2k = p.focal;\n\tfor (int i = 0; i < Nd; ++i) {\n\t\tp.dist_params[Np + i] /= f2k;\n\t\tf2k *= f2;\n\t}\n\n\tdouble r_pow[3];\n\n\tfor (int iter = 0; iter < 5; ++iter) {\n\t\tfor (int i = 0; i < n_pts; ++i) {\n\t\t\tdouble d = X(2, i) + p.t(2);\n\t\t\tdouble r2 = x.col(i).squaredNorm();\n\n\t\t\tdouble num = 1.0;\n\t\t\tdouble denom = p.focal;\n\n\t\t\tr_pow[0] = r2; // 2\n\t\t\tr_pow[1] = r2 * r2; // 4\n\t\t\tr_pow[2] = r2 * r_pow[1]; // 6\n\n\n\t\t\tfor (int k = 0; k < Np; k++) {\n\t\t\t\tnum += p.dist_params[k] * r_pow[k];\n\t\t\t}\n\t\t\tfor (int k = 0; k < Nd; k++) {\n\t\t\t\tdenom += p.dist_params[Np + k] * r_pow[k];\n\t\t\t}\n\n\t\t\tdouble factor = num / denom;\n\n\t\t\tdouble dfactor_df = -factor / denom;\n\n\t\t\tres(2 * i + 0) = factor * x(0, i) - X(0, i) / d;\n\t\t\tres(2 * i + 1) = factor * x(1, i) - X(1, i) / d;\n\n\t\t\tJ(2 * i + 0, 0) = X(0, i) / (d*d); // t\n\t\t\tJ(2 * i + 1, 0) = X(1, i) / (d*d);\n\t\t\tJ(2 * i + 0, 1) = dfactor_df * x(0, i);\n\t\t\tJ(2 * i + 1, 1) = dfactor_df * x(1, i);\n\n\t\t\tfor (int k = 0; k < Np; ++k) {\n\t\t\t\tdouble dfactor_dmu = r_pow[k] / denom;\n\t\t\t\tJ(2 * i + 0, 2 + k) = dfactor_dmu * x(0, i);\n\t\t\t\tJ(2 * i + 1, 2 + k) = dfactor_dmu * x(1, i);\n\t\t\t}\n\t\t\tfor (int k = 0; k < Nd; ++k) {\n\t\t\t\tdouble dfactor_dlambda = -r_pow[k] * num / (denom*denom);\n\t\t\t\tJ(2 * i + 0, 2 + Np + k) = dfactor_dlambda * x(0, i);\n\t\t\t\tJ(2 * i + 1, 2 + Np + k) = dfactor_dlambda * x(1, i);\n\t\t\t}\n\t\t}\n\n\t\t//std::cout << \"res = \" << res << \"\\n\";\n\t\t//std::cout << \"jac = \" << J << \"\\n\";\n\t\tif (res.norm() < SMALL_NUMBER)\n\t\t\tbreak;\n\n\t\tMatrix H = J.transpose()*J;\n\t\tH.diagonal().array() += 1e-6; // LM dampening\n\t\tMatrix g = -J.transpose()*res;\n\n\t\tif (Nd > 0 && Np > 0 && damp_factor > 0) {\n\t\t\t// For rational models we add a small dampening factor\n\t\t\tH.template block(2, 2).diagonal().array() += damp_factor;\n\t\t\tfor (int i = 0; i < Np + Nd; i++)\n\t\t\t\tg(2 + i) -= damp_factor * p.dist_params[i];\n\t\t}\n\n\n\t\tdx = H.ldlt().solve(g);\n\n\t\tp.t(2) += dx(0);\n\t\tp.focal += dx(1);\n\t\tfor (int i = 0; i < Np; ++i)\n\t\t\tp.dist_params[i] += dx(2 + i);\n\t\tfor (int i = 0; i < Nd; ++i)\n\t\t\tp.dist_params[i + Np] += dx(Np + 2 + i);\n\n\t\tif (dx.array().abs().maxCoeff() < SMALL_NUMBER)\n\t\t\tbreak;\n\t}\n\n\t// Revert change of variables\n\tf2 = p.focal * p.focal;\n\tf2k = f2;\n\tfor (int i = 0; i < Np; ++i) {\n\t\tp.dist_params[i] *= f2k;\n\t\tf2k *= f2;\n\t}\n\tf2k = p.focal;\n\tfor (int i = 0; i < Nd; ++i) {\n\t\tp.dist_params[Np + i] *= f2k;\n\t\tf2k *= f2;\n\t}\n}\n\ninline double simple_preconditioner(const Points2D &x, const Points3D &X) {\n\t// Simple preconditioner using the first two points\n\tdouble nx1 = x.col(0).squaredNorm();\n\tdouble nx2 = x.col(1).squaredNorm();\n\tdouble sx1 = X.block<2, 1>(0, 0).dot(x.col(0));\n\tdouble sx2 = X.block<2, 1>(0, 1).dot(x.col(1));\n\treturn (X(2, 1)*nx2*sx1 - X(2, 0)*nx1*sx2) / (nx1 * sx2 - nx2 * sx1);\n}\n\ntemplate\nint radialpose::larsson_iccv19::Solver::solve(const Points2D& image_points, const Points3D& world_points, std::vector* poses) const\n{\n\n\tstd::vector initial_poses;\n\tstd::vector t3;\n\n\tif (use_radial_solver) {\n\t\tkukelova_iccv13::Radial1DSolver::p5p_radial_impl(image_points, world_points, &initial_poses);\n\t} else {\n\t\tinitial_poses.push_back(Camera(Matrix3d::Identity(), Vector3d::Zero()));\n\t}\n\tMatrix X;\n\n\tfor (int k = 0; k < initial_poses.size(); k++) {\n\t\tt3.clear();\n\n\t\tX = initial_poses[k].R * world_points;\n\t\tX.colwise() += initial_poses[k].t;\n\n\t\t//std::cout << \"Initial pose \" << k + 1 << \"/\" << initial_poses.size() << \"\\n\";\n\t\t//std::cout << \"R=\" << initial_poses[k].R << \"\\nt=\" << initial_poses[k].t << \"\\n\";\n\t\t//std::cout << \"X=\" << X << \"\\n\";\n\n\t\tdouble t0 = 0;\n\t\tif (use_precond) {\n\t\t\tt0 = simple_preconditioner(image_points, X);\n\t\t\tX.row(2).array() += t0;\n\t\t}\n\n\t\tsolver_impl(image_points, X, &t3);\n\n\t\tfor (int i = 0; i < t3.size(); ++i) {\n\t\t\tCamera pose;\n\t\t\tpose.R = initial_poses[k].R;\n\t\t\tpose.t = initial_poses[k].t;\n\t\t\tpose.t(2) = t3[i];\n\n\t\t\tif (DistortionModel) {\n\t\t\t\tlinsolve_known_pose_dist(image_points, X, t3[i], Np, Nd, damp_factor, &pose);\n\t\t\t\tif(root_refinement)\n\t\t\t\t\tradial_refinement_dist(pose, image_points, X, damp_factor);\n\t\t\t} else {\n\t\t\t\tlinsolve_known_pose_undist(image_points, X, t3[i], Np, Nd, damp_factor, &pose);\n\t\t\t\tif(root_refinement)\n\t\t\t\t\tradial_refinement_undist(pose, image_points, X, damp_factor);\n\t\t\t}\t\t\t\n\n\t\t\tif (pose.focal < 0) {\n\t\t\t\t// flipped solution\n\t\t\t\tpose.focal = -pose.focal;\n\t\t\t\tpose.R.row(0) = -pose.R.row(0);\n\t\t\t\tpose.R.row(1) = -pose.R.row(1);\n\t\t\t\tpose.t(0) = -pose.t(0);\n\t\t\t\tpose.t(1) = -pose.t(1);\n\t\t\t}\n\n\t\t\t\n\t\t\t// Revert precond\n\t\t\tpose.t(2) += t0;\n\n\t\t\t//std::cout << \"solution[\" << i << \"], t3=\" << pose.t(2) << \"\\n\";\n\t\t\tposes->push_back(pose);\n\t\t}\n\t}\n\treturn poses->size();\n}\n\n\n\n\n// Template instantiations\ntemplate class radialpose::larsson_iccv19::Solver<1, 0, true>;\ntemplate class radialpose::larsson_iccv19::Solver<2, 0, true>;\ntemplate class radialpose::larsson_iccv19::Solver<3, 0, true>;\ntemplate class radialpose::larsson_iccv19::Solver<3, 3, true>;\ntemplate class radialpose::larsson_iccv19::Solver<1, 0, false>;\n//template class radialpose::larsson_iccv19::Solver<2, 0, false>;\n//template class radialpose::larsson_iccv19::Solver<3, 0, false>;\n//template class radialpose::larsson_iccv19::Solver<3, 3, false>;\n\ntemplate class radialpose::PoseEstimator>;\ntemplate class radialpose::PoseEstimator>;\ntemplate class radialpose::PoseEstimator>;\ntemplate class radialpose::PoseEstimator>;\ntemplate class radialpose::PoseEstimator>;\n\n/*\n This is broken?\n// These are implemented in larsson_iccv19_impl.cc\nextern template int radialpose::larsson_iccv19::Solver<1, 0, true>::solver_impl(Eigen::Matrix, Eigen::Matrix, std::vector*);\nextern template int radialpose::larsson_iccv19::Solver<2, 0, true>::solver_impl(Eigen::Matrix, Eigen::Matrix, std::vector*);\nextern template int radialpose::larsson_iccv19::Solver<3, 0, true>::solver_impl(Eigen::Matrix, Eigen::Matrix, std::vector*);\nextern template int radialpose::larsson_iccv19::Solver<3, 3, true>::solver_impl(Eigen::Matrix, Eigen::Matrix, std::vector*);\nextern template int radialpose::larsson_iccv19::Solver<1, 0, false>::solver_impl(Eigen::Matrix, Eigen::Matrix, std::vector*);\n*/", "meta": {"hexsha": "b8bbcf263066dc2af85246adcb336c2c6602d03f", "size": 13757, "ext": "cc", "lang": "C++", "max_stars_repo_path": "solvers/larsson_iccv19.cc", "max_stars_repo_name": "vlarsson/radialpose", "max_stars_repo_head_hexsha": "e620fc208f573820ade6a6fe321731d0f3eb082d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 34.0, "max_stars_repo_stars_event_min_datetime": "2019-10-29T02:48:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T20:28:29.000Z", "max_issues_repo_path": "solvers/larsson_iccv19.cc", "max_issues_repo_name": "vlarsson/radialpose", "max_issues_repo_head_hexsha": "e620fc208f573820ade6a6fe321731d0f3eb082d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2021-05-31T16:16:04.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-31T19:39:41.000Z", "max_forks_repo_path": "solvers/larsson_iccv19.cc", "max_forks_repo_name": "vlarsson/radialpose", "max_forks_repo_head_hexsha": "e620fc208f573820ade6a6fe321731d0f3eb082d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 10.0, "max_forks_repo_forks_event_min_datetime": "2019-11-04T21:38:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T20:41:11.000Z", "avg_line_length": 29.7770562771, "max_line_length": 187, "alphanum_fraction": 0.5750527004, "num_tokens": 5386, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8577680904463333, "lm_q2_score": 0.5813030906443134, "lm_q1q2_score": 0.4986232420325245}} {"text": "#include \n#include \n\nusing namespace Eigen;\nusing namespace std;\n\nint main() {\n ArrayXXf m(2, 2);\n\n // assign some values coefficient by coefficient\n m(0, 0) = 1.0;\n m(0, 1) = 2.0;\n m(1, 0) = 3.0;\n m(1, 1) = m(0, 1) + m(1, 0);\n\n // print values to standard output\n cout << m << endl << endl;\n\n // using the comma-initializer is also allowed\n m << 1.0, 2.0,\n 3.0, 4.0;\n\n // print values to standard output\n cout << m << endl;\n}\n", "meta": {"hexsha": "701a6b875c1cb571326f19b756102e5081a58a00", "size": 467, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "include/Eigen-3.3/doc/examples/Tutorial_ArrayClass_accessors.cpp", "max_stars_repo_name": "chen0510566/CarND-Path-Planning-Project", "max_stars_repo_head_hexsha": "4652e5c459980252e4ab72a0fd687341f3245466", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/Eigen-3.3/doc/examples/Tutorial_ArrayClass_accessors.cpp", "max_issues_repo_name": "chen0510566/CarND-Path-Planning-Project", "max_issues_repo_head_hexsha": "4652e5c459980252e4ab72a0fd687341f3245466", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/Eigen-3.3/doc/examples/Tutorial_ArrayClass_accessors.cpp", "max_forks_repo_name": "chen0510566/CarND-Path-Planning-Project", "max_forks_repo_head_hexsha": "4652e5c459980252e4ab72a0fd687341f3245466", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.9615384615, "max_line_length": 50, "alphanum_fraction": 0.5824411135, "num_tokens": 176, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES\n\n", "lm_q1_score": 0.6791786861878392, "lm_q2_score": 0.7341195385342971, "lm_q1q2_score": 0.4985983436865467}} {"text": "/*!\n \\file gpp_hyperparameter_optimization_demo.cpp\n \\rst\n ``moe/optimal_learning/cpp/gpp_hyperparameter_optimization_demo.cpp``\n\n This is a demo for the model selection (via hyperparameter optimization) capability\n present in this project. These capabilities live in\n gpp_model_selection.\n\n In gpp_expected_improvement_demo, we choose the hyperparameters arbitrarily. Here,\n we will walk through an example of how one would select hyperparameters for a given\n class of covariance function; here, SquareExponential will do. This demo supports:\n\n 1. User-specified training data\n 2. Randomly generated training data (more automatic)\n\n More details on the second case:\n\n 1. Choose a set of hyperparameters randomly: source covariance\n 2. Build a fake\\* training set by drawing from a GP with source covariance, at randomly\n chosen locations\n \\* By defining OL_USER_INPUTS to 1, you can specify your own input data.\n 3. Choose a new random set of hyperparameters and run hyperparameter optimization\n\n a. Show log likelihood using the optimized hyperparameters AND the source hyperparameters\n b. observe that with larger training sets, the optimized hyperparameters converge\n to the source values; but in smaller sets other optima may exist\n\n Further notes about [newton] optimization performance and robustness are spread throughout the\n demo code, placed near the function call/object construction that they are relevant to.\n\n Please read and understand gpp_expected_improvement_demo.cpp before going through\n this example. In addition, understanding gpp_model_selection.hpp's\n file comments (as well as cpp for devs) is prerequisite.\n\\endrst*/\n\n#include \n\n#include \n\n#include // NOLINT(build/include_order)\n\n#include \"gpp_common.hpp\"\n#include \"gpp_covariance.hpp\"\n#include \"gpp_domain.hpp\"\n#include \"gpp_logging.hpp\"\n#include \"gpp_math.hpp\"\n#include \"gpp_model_selection.hpp\"\n#include \"gpp_optimizer_parameters.hpp\"\n#include \"gpp_random.hpp\"\n#include \"gpp_test_utils.hpp\"\n\n#define OL_USER_INPUTS 0\n\nusing namespace optimal_learning; // NOLINT, i'm lazy in this file which has no external linkage anyway\n\nint main() {\n using DomainType = TensorProductDomain;\n using HyperparameterDomainType = TensorProductDomain;\n // here we set some configurable parameters\n // feel free to change them (and recompile) as you explore\n // comments next to each parameter will indicate its purpose and domain\n\n // the \"spatial\" dimension, aka the number of independent (experiment) parameters\n // i.e., this is the dimension of the points in points_sampled\n static const int dim = 3; // > 0\n\n // number of points that we have already sampled; i.e., size of the training set\n static const int num_sampled = 100; // >= 0\n // observe that as num_sampled increases, the optimal set of hyperparameters (via optimization) will approach\n // the set used to generate the input data (in the case of generating inputs randomly from a GP). Don't try overly\n // large values or it will be slow; for reference 500 samples takes ~2-3 min on my laptop whereas 100 samples takes ~1s\n\n // the log likelihoods will also decrease in value since by adding more samples, we are more greatly restricting the GP\n // into ever-narrower sets of likely realizations\n\n UniformRandomGenerator uniform_generator(314); // repeatable results\n // construct with (base_seed, thread_id) to generate a 'random' seed\n\n // specifies the domain of each independent variable in (min, max) pairs\n // set appropriately for user-specified inputs\n // mostly irrelevant for randomly generated inputs\n std::vector domain_bounds = {\n {-1.5, 2.3}, // first dimension\n {0.1, 3.1}, // second dimension\n {1.7, 2.9}}; // third dimension\n DomainType domain(domain_bounds.data(), dim);\n\n // now we allocate point sets; ALL POINTS MUST LIE INSIDE THE DOMAIN!\n std::vector points_sampled(num_sampled*dim);\n\n std::vector points_sampled_value(num_sampled);\n\n // default to 0 noise\n std::vector noise_variance(num_sampled, 0.0); // each entry must be >= 0.0\n // choosing too much noise makes little sense: cannot make useful predicitions if data\n // is drowned out by noise\n // choosing 0 noise is dangerous for large problems; the covariance matrix becomes very\n // ill-conditioned, and adding noise caps the maximum condition number at roughly\n // 1.0/min(noise_variance)\n\n // covariance selection\n using CovarianceClass = SquareExponential; // see gpp_covariance.hpp for other options\n\n // arbitrary hyperparameters used to generate data\n std::vector hyperparameters_original(1 + dim);\n CovarianceClass covariance_original(dim, 1.0, 1.0);\n // CovarianceClass provides SetHyperparameters, GetHyperparameters to read/modify\n // hyperparameters later on\n // Generate hyperparameters randomly\n boost::uniform_real uniform_double_for_hyperparameter(0.5, 1.5);\n FillRandomCovarianceHyperparameters(uniform_double_for_hyperparameter, &uniform_generator,\n &hyperparameters_original, &covariance_original);\n\n std::vector hyperparameter_domain_bounds(covariance_original.GetNumberOfHyperparameters(), {1.0e-10, 1.0e10});\n HyperparameterDomainType hyperparameter_domain(hyperparameter_domain_bounds.data(),\n covariance_original.GetNumberOfHyperparameters());\n\n // now fill data\n#if OL_USER_INPUTS == 1\n // if you prefer, insert your own data here\n // requirements aka variables that must be set:\n // noise variance, num_sampled values: defaulted to 0; need to set this for larger data sets to deal with conditioning\n // points_sampled, num_sampled*dim values: the locations of already-sampled points; must be INSIDE the domain\n // points_sampled_value, num_sampled values: the function values at the already-sampled points\n // covariance_perturbed: a CovarianceClass object constructed with perturbed (from covariance_original) hyperparameters;\n // must have decltype(covariance_perturbed) == decltype(covariance_original) for hyperparameter opt to make any sense\n\n // NOTE: the GP is 0-mean, so shift your points_sampled_value entries accordingly\n // e.g., if the samples are from a function with mean M, subtract it out\n#else\n // generate GP inputs randomly\n\n // set noise\n std::fill(noise_variance.begin(), noise_variance.end(), 1.0e-1); // arbitrary choice\n\n // use latin hypercube sampling to get a reasonable distribution of training point locations\n domain.GenerateUniformPointsInDomain(num_sampled, &uniform_generator, points_sampled.data());\n\n // build an empty GP: since num_sampled (last arg) is 0, none of the data arrays will be used here\n GaussianProcess gp_generator(covariance_original, points_sampled.data(), points_sampled_value.data(),\n noise_variance.data(), dim, 0);\n // fill the GP with randomly generated data\n FillRandomGaussianProcess(points_sampled.data(), noise_variance.data(), dim, num_sampled,\n points_sampled_value.data(), &gp_generator);\n\n // choose a random initial guess reasonably far away from hyperparameters_original\n // to find some optima (see WARNING2 below), it may be necessary to start with hyperparameters smaller than the originals or\n // of similar magnitude\n std::vector hyperparameters_perturbed(covariance_original.GetNumberOfHyperparameters());\n CovarianceClass covariance_perturbed(dim, 1.0, 1.0);\n boost::uniform_real uniform_double_for_wrong_hyperparameter(5.0, 12.0);\n FillRandomCovarianceHyperparameters(uniform_double_for_wrong_hyperparameter, &uniform_generator,\n &hyperparameters_perturbed, &covariance_perturbed);\n#endif\n\n // log likelihood type selection\n using LogLikelihoodEvaluator = LogMarginalLikelihoodEvaluator;\n // log likelihood evaluator object\n LogLikelihoodEvaluator log_marginal_eval(points_sampled.data(), points_sampled_value.data(),\n noise_variance.data(), dim, num_sampled);\n\n int total_newton_errors = 0; // number of newton runs that failed due to singular hessians\n int newton_max_num_steps = 500; // max number of newton steps\n double gamma_newton = 1.05; // newton diagonal dominance scale-down factor (see newton docs for details)\n double pre_mult_newton = 1.0e-1; // newton diagonal dominance scaling factor (see newton docs for details)\n double max_relative_change_newton = 1.0;\n double tolerance_newton = 1.0e-11;\n NewtonParameters newton_parameters(1, newton_max_num_steps, gamma_newton, pre_mult_newton,\n max_relative_change_newton, tolerance_newton);\n\n // call newton to optimize hyperparameters\n // in general if this takes the full hyperparameter_max_num_steps iterations, something went wrong\n // newton's solution:\n std::vector new_newton_hyperparameters(covariance_original.GetNumberOfHyperparameters());\n\n printf(OL_ANSI_COLOR_CYAN \"ORIGINAL HYPERPARMETERS:\\n\" OL_ANSI_COLOR_RESET);\n printf(\"Original Hyperparameters:\\n\");\n PrintMatrix(hyperparameters_original.data(), 1, covariance_original.GetNumberOfHyperparameters());\n\n printf(OL_ANSI_COLOR_CYAN \"NEWTON OPTIMIZED HYPERPARAMETERS:\\n\" OL_ANSI_COLOR_RESET);\n // run newton optimization\n total_newton_errors += NewtonHyperparameterOptimization(log_marginal_eval, covariance_perturbed,\n newton_parameters, hyperparameter_domain,\n new_newton_hyperparameters.data());\n // WARNING: the gradient of log marginal appears to go to 0 as you move toward infinity. if you do not start\n // close enough to an optima or have overly aggressive diagonal dominance settings, newton will skip miss everything\n // going on locally and shoot out to these solutions.\n // Having hyperparameters = 1.0e10 is nonsense, and usually this problem is further signaled by a log marginal likelihood\n // that is POSITIVE (impossible since p \\in [0,1], so log(p) \\in (-\\infty, 0])\n\n // Long-term, we should solve this problem by multistarting newton. Additionally there will be some kind of \"quick kill\"\n // mechanism needed--when newton is wandering down the wrong path (or to an already-known solution?) we should detect it\n // and kill it quickly to keep cost low.\n // For now, just play around with different initial conditions or more conservative gamam settings.\n\n // WARNING2: for small num_sampled, it often appears that the solution becomes independent of one or more hyperparameters.\n // e.g., in 2D, we'd have an optimal \"ridge.\" Finding this robustly requires starting near it, so the random choice of\n // initial conditions can fail horribly in general.\n\n // WARNING3: if you choose large values of num_sampled (like 300), this can be quite slow; about 5min on my computer\n // sometimes the reason is that machine prescision prevents us from reaching the cutoff criterion:\n // norm_gradient_likelihood <= 1.0e-13 in NewtonHyperparameterOptimization() in gpp_model_selection...cpp\n // So you may need to relax this to 1.0e-10 or something so that we aren't just spinning wheels at almost-converged but\n // unable to actually move anywhere.\n\n printf(\"Result of newton:\\n\");\n PrintMatrix(new_newton_hyperparameters.data(), 1, covariance_original.GetNumberOfHyperparameters());\n\n if (total_newton_errors > 0) {\n printf(\"WARNING: %d newton runs exited due to singular Hessian matrices.\\n\", total_newton_errors);\n }\n\n printf(OL_ANSI_COLOR_CYAN \"LOG LIKELIHOOD + GRADIENT AT NEWTON OPTIMIZED FINAL HYPERPARAMS:\\n\" OL_ANSI_COLOR_RESET);\n\n CovarianceClass covariance_final(dim, new_newton_hyperparameters[0], new_newton_hyperparameters.data() + 1);\n typename LogLikelihoodEvaluator::StateType log_marginal_state_newton_optimized_hyper(log_marginal_eval,\n covariance_final);\n double newton_log_marginal_opt = log_marginal_eval.ComputeLogLikelihood(log_marginal_state_newton_optimized_hyper);\n printf(\"newton optimized log marginal likelihood = %.18E\\n\", newton_log_marginal_opt);\n\n std::vector grad_log_marginal_opt(covariance_final.GetNumberOfHyperparameters());\n log_marginal_eval.ComputeGradLogLikelihood(&log_marginal_state_newton_optimized_hyper,\n grad_log_marginal_opt.data());\n printf(\"grad log likelihood: \");\n PrintMatrix(grad_log_marginal_opt.data(), 1, covariance_final.GetNumberOfHyperparameters());\n\n printf(OL_ANSI_COLOR_CYAN \"LOG LIKELIHOOD + GRADIENT AT ORIGINAL HYPERPARAMS:\\n\" OL_ANSI_COLOR_RESET);\n typename LogLikelihoodEvaluator::StateType log_marginal_state_original_hyper(log_marginal_eval,\n covariance_original);\n\n double original_log_marginal = log_marginal_eval.ComputeLogLikelihood(log_marginal_state_original_hyper);\n printf(\"original log marginal likelihood = %.18E\\n\", original_log_marginal);\n\n std::vector original_grad_log_marginal(covariance_original.GetNumberOfHyperparameters());\n log_marginal_eval.ComputeGradLogLikelihood(&log_marginal_state_original_hyper,\n original_grad_log_marginal.data());\n printf(\"grad log likelihood: \");\n PrintMatrix(original_grad_log_marginal.data(), 1, covariance_original.GetNumberOfHyperparameters());\n\n return 0;\n} // end main\n", "meta": {"hexsha": "9b9b5302745b750137be039eae6967e81ca7bade", "size": 13625, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "moe/optimal_learning/cpp/gpp_hyperparameter_optimization_demo.cpp", "max_stars_repo_name": "dstoeckel/MOE", "max_stars_repo_head_hexsha": "5b5a6a2c6c3cf47320126f7f5894e2a83e347f5c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 966.0, "max_stars_repo_stars_event_min_datetime": "2015-01-10T05:27:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T21:04:36.000Z", "max_issues_repo_path": "moe/optimal_learning/cpp/gpp_hyperparameter_optimization_demo.cpp", "max_issues_repo_name": "dstoeckel/MOE", "max_issues_repo_head_hexsha": "5b5a6a2c6c3cf47320126f7f5894e2a83e347f5c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 46.0, "max_issues_repo_issues_event_min_datetime": "2015-01-16T22:33:08.000Z", "max_issues_repo_issues_event_max_datetime": "2019-09-04T16:33:27.000Z", "max_forks_repo_path": "moe/optimal_learning/cpp/gpp_hyperparameter_optimization_demo.cpp", "max_forks_repo_name": "dstoeckel/MOE", "max_forks_repo_head_hexsha": "5b5a6a2c6c3cf47320126f7f5894e2a83e347f5c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 143.0, "max_forks_repo_forks_event_min_datetime": "2015-01-07T03:57:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-28T01:10:45.000Z", "avg_line_length": 56.0699588477, "max_line_length": 128, "alphanum_fraction": 0.7492844037, "num_tokens": 3129, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.6791786861878392, "lm_q2_score": 0.7341195269001831, "lm_q1q2_score": 0.4985983357849044}} {"text": "\n#include \"mex.h\"\n#include \n#include \"re3q3/re3q3.h\"\n\nvoid printUsage() {\n\tmexPrintf(\"[x] = re3q3(C);\\n\");\n\tmexPrintf(\" coefficient order is [ x^2, xy, xz, y^2, yz, z^2, x, y, z, 1.0 ]\\n\");\n}\n\nvoid mexFunction(int nlhs,mxArray *plhs[], int nrhs, const mxArray *prhs[]) {\n\n\tif (nrhs != 1) {\n\t\tprintUsage();\n\t\tmexErrMsgTxt(\"Please, specify 3 x 10N coefficient matrix\");\n\t}\n\tif (nlhs != 1) {\n\t\tprintUsage();\n\t\tmexErrMsgTxt(\"Require one output argument.\");\n\t}\n\t\n\tif ((mxGetM(prhs[0]) != 3) || (mxGetN(prhs[0]) % 10 != 0)) {\n\t\tprintUsage();\n\t\tmexErrMsgTxt(\"One input 3 x 10N matrix is required.\");\n\t}\n\n\tint n_instances = mxGetN(prhs[0]) / 10;\n\t\n\tdouble *data = mxGetPr(prhs[0]);\n\n\tEigen::Matrix coeffs;\n\tEigen::Matrix solutions;\n\n\tif (n_instances == 1) {\n\t\t// we are solving a single instance, output is 3xNsols\n\t\tcoeffs = Eigen::Map>(data);\n\n\t\tint n_sols = re3q3::re3q3(coeffs, &solutions);\n\t\tplhs[0] = mxCreateDoubleMatrix(3, n_sols, mxREAL);\n\n\t\tEigen::Map output_matrix = Eigen::Map(mxGetPr(plhs[0]), 3, n_sols);\n\t\toutput_matrix = solutions.block(0, 0, 3, n_sols);\n\t} else {\n\t\t// we are solving multiple instances, output is 3x8N\n\t\tplhs[0] = mxCreateDoubleMatrix(3, 8 * n_instances, mxREAL);\n\t\tdouble *output = mxGetPr(plhs[0]);\n\n\t\tEigen::Map output_matrix = Eigen::Map(mxGetPr(plhs[0]), 3, 8*n_instances);\n\t\toutput_matrix.setZero();\n\n\t\tfor (int i = 0; i < n_instances; i++) {\n\t\t\tcoeffs = Eigen::Map>(data + 30 * i);\n\t\t\tsolutions.setZero();\n\t\t\tint n_sols = re3q3::re3q3(coeffs, &solutions);\n\t\t\toutput_matrix.block(0, i * 8, 3, n_sols) = solutions.block(0,0,3,n_sols);\n\t\t}\n\t}\n}\n", "meta": {"hexsha": "ac7c3ccac9aa9a081e4cd6874208159bc600b8c7", "size": 1746, "ext": "cc", "lang": "C++", "max_stars_repo_path": "re3q3_mex.cc", "max_stars_repo_name": "vlarsson/re3q3", "max_stars_repo_head_hexsha": "ab03d271f0a30f516f052d750773b0277898751f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2020-03-26T09:30:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T10:14:01.000Z", "max_issues_repo_path": "re3q3_mex.cc", "max_issues_repo_name": "vlarsson/re3q3", "max_issues_repo_head_hexsha": "ab03d271f0a30f516f052d750773b0277898751f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "re3q3_mex.cc", "max_forks_repo_name": "vlarsson/re3q3", "max_forks_repo_head_hexsha": "ab03d271f0a30f516f052d750773b0277898751f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-07-18T06:19:02.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-18T06:19:02.000Z", "avg_line_length": 29.593220339, "max_line_length": 110, "alphanum_fraction": 0.6466208477, "num_tokens": 641, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7341195152660688, "lm_q2_score": 0.679178692681616, "lm_q1q2_score": 0.49859833265047027}} {"text": "#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"split_edges.h\"\nusing namespace std;\n\nvoid split_edges_until_bound(Eigen::MatrixXd & V,Eigen::MatrixXi & F, Eigen::VectorXi & feature, Eigen::VectorXd & high, Eigen::VectorXd & low){\n \n using namespace Eigen;\n int m = F.rows();\n int n = V.rows();\n int num_feat = feature.size();\n std::vector> A;\n std::vector is_feature_vertex;\n is_feature_vertex.resize(n);\n Eigen::VectorXi is_feature_vertex_vec;\n is_feature_vertex_vec.setZero(n);\n igl::adjacency_list(F,A);\n Eigen::MatrixXi E,uE;\n Eigen::VectorXi EMAP;\n std::vector> uE2E;\n igl::unique_edge_map(F,E,uE,EMAP,uE2E);\n int k = uE.rows();\n //std::cout << \"Start split_edges_until_bound\" << std::endl;\n \n \n for (int s = 0; s < num_feat; s++) {\n is_feature_vertex[feature(s)] = true;\n // is_feature_vertex_vec(feature(s)) = 1;\n }\n \n bool keep_splitting = true;\n std::vector edges_to_split;\n \n while (keep_splitting) {\n //std::cout << \"A\" << std::endl;\n edges_to_split.resize(0);\n \n for (int i = 0; i < uE.rows(); i++) {\n //std::cout << \"B\" << std::endl;\n if (!is_feature_vertex[uE(i,0)] && !is_feature_vertex[uE(i,1)] && uE2E[i].size()==2) {\n //if (is_feature_vertex_vec(uE(i,0))==0 && is_feature_vertex_vec(uE(i,1))==0 && uE2E[i].size()==2) {\n if ( (V.row(uE(i,0))-V.row(uE(i,1))).norm()>((high(uE(i,0))+high(uE(i,1)))/2) ){\n edges_to_split.push_back(i);\n //std::cout << \"C\" << std::endl;\n }\n }\n }\n \n //std::cout << \"B\" << std::endl;\n \n //std::cout << \"D\" << std::endl;\n if(edges_to_split.size()==0){\n keep_splitting = false;\n }else{\n // SPLIT EDGES IN VECTOR edges_to_split\n //\n \n //std::cout << \"Before call to split_edges\" << std::endl;\n //std::cout << edges_to_split.size() << std::endl;\n split_edges(V,F,E,uE,EMAP,uE2E,high,low,edges_to_split);\n //igl::writeOBJ(\"test.obj\",V,F);\n //igl::unique_edge_map(F,E,uE,EMAP,uE2E);\n //std::cout << igl::is_edge_manifold(F) << std::endl;\n //std::cout << \"After call to split_edges\" << std::endl;\n \n }\n \n \n keep_splitting = false; // THIS IS A PATCH, NOT GOOD\n }\n \n \n}\n\n\n// g++ -I/usr/local/libigl/external/eigen -I/usr/local/libigl/include -std=c++11 -framework Accelerate main.cpp remesh_botsch.cpp -o main\n\n", "meta": {"hexsha": "5f08f87e66c35e3a28ef7f814b9229807676775e", "size": 3477, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/split_edges_until_bound.cpp", "max_stars_repo_name": "sgsellan/opening-and-closing-surfaces", "max_stars_repo_head_hexsha": "57127178c2e8d50396c02a853c4456a90e9220c5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16.0, "max_stars_repo_stars_event_min_datetime": "2020-10-27T00:03:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-01T19:44:35.000Z", "max_issues_repo_path": "src/split_edges_until_bound.cpp", "max_issues_repo_name": "sgsellan/opening-and-closing-surfaces", "max_issues_repo_head_hexsha": "57127178c2e8d50396c02a853c4456a90e9220c5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/split_edges_until_bound.cpp", "max_forks_repo_name": "sgsellan/opening-and-closing-surfaces", "max_forks_repo_head_hexsha": "57127178c2e8d50396c02a853c4456a90e9220c5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2020-10-27T01:40:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-23T13:42:16.000Z", "avg_line_length": 33.7572815534, "max_line_length": 144, "alphanum_fraction": 0.5970664366, "num_tokens": 998, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7826624789529376, "lm_q2_score": 0.6370308082623217, "lm_q1q2_score": 0.4985801115639822}} {"text": "#include \n#include \n#include \n#include \n#include \n\nTEST(ProbDistributions, fvar_var) {\n using Eigen::Dynamic;\n using Eigen::Matrix;\n using stan::math::fvar;\n using stan::math::var;\n\n Matrix, Dynamic, 1> theta(3, 1);\n theta << 0.2, 0.3, 0.5;\n Matrix, Dynamic, 1> alpha(3, 1);\n alpha << 1.0, 1.0, 1.0;\n for (int i = 0; i < 3; i++) {\n theta(i).d_ = 1.0;\n alpha(i).d_ = 1.0;\n }\n\n EXPECT_FLOAT_EQ(0.6931472,\n stan::math::dirichlet_log(theta, alpha).val_.val());\n EXPECT_FLOAT_EQ(0.99344212, stan::math::dirichlet_log(theta, alpha).d_.val());\n\n Matrix, Dynamic, 1> theta2(4, 1);\n theta2 << 0.01, 0.01, 0.8, 0.18;\n Matrix, Dynamic, 1> alpha2(4, 1);\n alpha2 << 10.5, 11.5, 19.3, 5.1;\n for (int i = 0; i < 3; i++) {\n theta2(i).d_ = 1.0;\n alpha2(i).d_ = 1.0;\n }\n\n EXPECT_FLOAT_EQ(-43.40045,\n stan::math::dirichlet_log(theta2, alpha2).val_.val());\n EXPECT_FLOAT_EQ(2017.2858,\n stan::math::dirichlet_log(theta2, alpha2).d_.val());\n}\n\nTEST(ProbDistributions, fvar_varVectorised) {\n using Eigen::Dynamic;\n using Eigen::Matrix;\n using stan::math::dirichlet_log;\n using stan::math::fvar;\n using stan::math::var;\n\n Matrix, Dynamic, 1> theta1(3, 1), theta2(3, 1), theta3(3, 1);\n theta1 << 0.2, 0.3, 0.5;\n theta2 << 0.1, 0.8, 0.1;\n theta3 << 0.6, 0.1, 0.3;\n\n Matrix, Dynamic, 1> alpha1(3, 1), alpha2(3, 1), alpha3(3, 1);\n alpha1 << 1.0, 1.0, 1.0;\n alpha2 << 6.2, 3.5, 9.1;\n alpha3 << 2.5, 7.4, 6.1;\n\n std::vector, Dynamic, 1>> theta_vec(3);\n theta_vec[0] = theta1;\n theta_vec[1] = theta2;\n theta_vec[2] = theta3;\n\n std::vector, Dynamic, 1>> alpha_vec(3);\n alpha_vec[0] = alpha1;\n alpha_vec[1] = alpha2;\n alpha_vec[2] = alpha3;\n\n Matrix, Dynamic, 1> result(3);\n result[0] = dirichlet_log(theta1, alpha1);\n result[1] = dirichlet_log(theta2, alpha2);\n result[2] = dirichlet_log(theta3, alpha3);\n\n fvar out = dirichlet_log(theta_vec, alpha_vec);\n\n EXPECT_FLOAT_EQ(result.val().val().sum(), out.val_.val());\n EXPECT_FLOAT_EQ(result.d().val().sum(), out.d_.val());\n\n result[0] = dirichlet_log(theta1, alpha1);\n result[1] = dirichlet_log(theta2, alpha1);\n result[2] = dirichlet_log(theta3, alpha1);\n\n out = dirichlet_log(theta_vec, alpha1);\n\n EXPECT_FLOAT_EQ(result.val().val().sum(), out.val_.val());\n EXPECT_FLOAT_EQ(result.d().val().sum(), out.d_.val());\n\n result[0] = dirichlet_log(theta1, alpha1);\n result[1] = dirichlet_log(theta1, alpha2);\n result[2] = dirichlet_log(theta1, alpha3);\n\n out = dirichlet_log(theta1, alpha_vec);\n\n EXPECT_FLOAT_EQ(result.val().val().sum(), out.val_.val());\n EXPECT_FLOAT_EQ(result.d().val().sum(), out.d_.val());\n}\n\nTEST(ProbDistributions, fvar_fvar_var) {\n using Eigen::Dynamic;\n using Eigen::Matrix;\n using stan::math::fvar;\n using stan::math::var;\n\n Matrix>, Dynamic, 1> theta(3, 1);\n theta << 0.2, 0.3, 0.5;\n Matrix>, Dynamic, 1> alpha(3, 1);\n alpha << 1.0, 1.0, 1.0;\n for (int i = 0; i < 3; i++) {\n theta(i).d_ = 1.0;\n alpha(i).d_ = 1.0;\n }\n\n EXPECT_FLOAT_EQ(0.6931472,\n stan::math::dirichlet_log(theta, alpha).val_.val_.val());\n EXPECT_FLOAT_EQ(0.99344212,\n stan::math::dirichlet_log(theta, alpha).d_.val_.val());\n\n Matrix>, Dynamic, 1> theta2(4, 1);\n theta2 << 0.01, 0.01, 0.8, 0.18;\n Matrix>, Dynamic, 1> alpha2(4, 1);\n alpha2 << 10.5, 11.5, 19.3, 5.1;\n for (int i = 0; i < 3; i++) {\n theta2(i).d_ = 1.0;\n alpha2(i).d_ = 1.0;\n }\n\n EXPECT_FLOAT_EQ(-43.40045,\n stan::math::dirichlet_log(theta2, alpha2).val_.val_.val());\n EXPECT_FLOAT_EQ(2017.2858,\n stan::math::dirichlet_log(theta2, alpha2).d_.val_.val());\n}\n\nTEST(ProbDistributions, fvar_fvar_varVectorised) {\n using Eigen::Dynamic;\n using Eigen::Matrix;\n using stan::math::dirichlet_log;\n using stan::math::fvar;\n using stan::math::var;\n\n Matrix>, Dynamic, 1> theta1(3, 1), theta2(3, 1), theta3(3, 1);\n theta1 << 0.2, 0.3, 0.5;\n theta2 << 0.1, 0.8, 0.1;\n theta3 << 0.6, 0.1, 0.3;\n\n Matrix>, Dynamic, 1> alpha1(3, 1), alpha2(3, 1), alpha3(3, 1);\n alpha1 << 1.0, 1.0, 1.0;\n alpha2 << 6.2, 3.5, 9.1;\n alpha3 << 2.5, 7.4, 6.1;\n\n std::vector>, Dynamic, 1>> theta_vec(3);\n theta_vec[0] = theta1;\n theta_vec[1] = theta2;\n theta_vec[2] = theta3;\n\n std::vector>, Dynamic, 1>> alpha_vec(3);\n alpha_vec[0] = alpha1;\n alpha_vec[1] = alpha2;\n alpha_vec[2] = alpha3;\n\n Matrix>, Dynamic, 1> result(3);\n result[0] = dirichlet_log(theta1, alpha1);\n result[1] = dirichlet_log(theta2, alpha2);\n result[2] = dirichlet_log(theta3, alpha3);\n\n fvar> out = dirichlet_log(theta_vec, alpha_vec);\n\n EXPECT_FLOAT_EQ(result.val().val().val().sum(), out.val_.val_.val());\n EXPECT_FLOAT_EQ(result.d().val().val().sum(), out.d_.val_.val());\n\n result[0] = dirichlet_log(theta1, alpha1);\n result[1] = dirichlet_log(theta2, alpha1);\n result[2] = dirichlet_log(theta3, alpha1);\n\n out = dirichlet_log(theta_vec, alpha1);\n\n EXPECT_FLOAT_EQ(result.val().val().val().sum(), out.val_.val_.val());\n EXPECT_FLOAT_EQ(result.d().val().val().sum(), out.d_.val_.val());\n\n result[0] = dirichlet_log(theta1, alpha1);\n result[1] = dirichlet_log(theta1, alpha2);\n result[2] = dirichlet_log(theta1, alpha3);\n\n out = dirichlet_log(theta1, alpha_vec);\n\n EXPECT_FLOAT_EQ(result.val().val().val().sum(), out.val_.val_.val());\n EXPECT_FLOAT_EQ(result.d().val().val().sum(), out.d_.val_.val());\n}\n", "meta": {"hexsha": "f3b6092b9f1d9d0f9fad297e91e646525b03d8aa", "size": 5747, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/unit/math/mix/prob/dirichlet_test.cpp", "max_stars_repo_name": "bayesmix-dev/math", "max_stars_repo_head_hexsha": "3616f7195adc95ef8e719a2af845d61102bc9272", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2020-06-14T14:33:37.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-14T14:33:37.000Z", "max_issues_repo_path": "test/unit/math/mix/prob/dirichlet_test.cpp", "max_issues_repo_name": "bayesmix-dev/math", "max_issues_repo_head_hexsha": "3616f7195adc95ef8e719a2af845d61102bc9272", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/unit/math/mix/prob/dirichlet_test.cpp", "max_forks_repo_name": "bayesmix-dev/math", "max_forks_repo_head_hexsha": "3616f7195adc95ef8e719a2af845d61102bc9272", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-05-10T12:55:07.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-10T12:55:07.000Z", "avg_line_length": 30.7326203209, "max_line_length": 80, "alphanum_fraction": 0.6276318079, "num_tokens": 2188, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7826624789529375, "lm_q2_score": 0.6370308082623217, "lm_q1q2_score": 0.49858011156398213}} {"text": "//==================================================================================================\n/*\n Copyright 2016 NumScale SAS\n\n Distributed under the Boost Software License, Version 1.0.\n (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)\n*/\n//==================================================================================================\n//! [reduce-phase]\n#include \n#include \n#include \n\nint main()\n{\n float values[] = {1.f,2.f,3.f,4.f,5.f,6.f,7.f,8.f,9.f,};\n\n std::cout << \"SIMD reduce :\"\n << boost::simd::reduce( &values[0], &values[0]+9, 1.f\n , boost::simd::multiplies\n )\n << std::endl;\n\n return 0;\n}\n//! [reduce-phase]\n", "meta": {"hexsha": "10713798aac70e60e7bb54804ab60e673ee8bdfd", "size": 812, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/doc/range/reduce.phase.cpp", "max_stars_repo_name": "TobiasLudwig/boost.simd", "max_stars_repo_head_hexsha": "c04d0cc56747188ddb9a128ccb5715dd3608dbc1", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 6.0, "max_stars_repo_stars_event_min_datetime": "2018-02-25T22:23:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-15T15:13:12.000Z", "max_issues_repo_path": "test/doc/range/reduce.phase.cpp", "max_issues_repo_name": "remymuller/boost.simd", "max_issues_repo_head_hexsha": "3caefb7ee707e5f68dae94f8f31f72f34b7bb5de", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/doc/range/reduce.phase.cpp", "max_forks_repo_name": "remymuller/boost.simd", "max_forks_repo_head_hexsha": "3caefb7ee707e5f68dae94f8f31f72f34b7bb5de", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 7.0, "max_forks_repo_forks_event_min_datetime": "2017-12-12T12:36:31.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-10T14:27:07.000Z", "avg_line_length": 30.0740740741, "max_line_length": 100, "alphanum_fraction": 0.4150246305, "num_tokens": 178, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7826624890918021, "lm_q2_score": 0.6370307944803832, "lm_q1q2_score": 0.498580107236145}} {"text": "#define BOOST_TEST_MODULE \"Test General Distance Function\"\n\n#include \n#include \n#include \n\n#include \"distance/Euclidean.hpp\"\n#include \"distance/Chebyshev.hpp\"\n#include \"distance/Manhattan.hpp\"\n#include \"distance/Cosine.hpp\"\n#include \"distance/Distance.hpp\"\n#include \"Exception.hpp\"\n#include \"TimeSeries.hpp\"\n\nusing namespace genex;\nusing std::isinf;\n\n#define TOLERANCE 1e-9\nstruct MockData\n{\n dist_t euclidean_dist = pairwiseDistance;\n dist_t manhattan_dist = pairwiseDistance;\n dist_t chebyshev_dist = pairwiseDistance;\n\n dist_t euclidean_warped_dist = warpedDistance;\n dist_t manhattan_warped_dist = warpedDistance;\n dist_t chebyshev_warped_dist = warpedDistance;\n\n data_t dat_1[5] = {1, 2, 3, 4, 5};\n data_t dat_2[5] = {11, 2, 3, 4, 5};\n\n data_t dat_3[2] = {2, 4};\n data_t dat_4[5] = {2, 2, 2, 4, 4};\n\n data_t dat_5[4] = {1, 2, 2, 4};\n data_t dat_6[4] = {1, 2, 4, 5};\n\n data_t dat_7[4] = {2, 2, 2, 2};\n data_t dat_8[4] = {20, 20, 20, 15};\n\n data_t dat_9[6] = {2, 2, 2, 2, 2, 2};\n data_t dat_10[6] = {4, 3, 3, 3, 3, 3};\n\n data_t dat_11[7] = {4, 3, 5, 3, 5, 3, 4};\n data_t dat_12[7] = {4, 3, 3, 1, 1, 3, 4};\n\n data_t dat_13[10] = {0, 2, 3, 5, 8, 6, 3, 2, 3, 5};\n data_t dat_14[7] = {8, 4, 6, 1, 5, 10, 9};\n};\n\nBOOST_AUTO_TEST_CASE( general_distance, *boost::unit_test::tolerance(TOLERANCE) )\n{\n MockData data;\n TimeSeries ts_1(data.dat_1, 0, 0, 5);\n TimeSeries ts_2(data.dat_2, 0, 0, 5);\n\n data_t total_1 = data.euclidean_dist(ts_1, ts_2, INF, gNoMatching);\n BOOST_TEST( total_1, 2.0 );\n\n data_t total_2 = data.manhattan_dist(ts_1, ts_2, INF, gNoMatching);\n BOOST_TEST( total_2, 2.0 );\n\n data_t total_3 = data.chebyshev_dist(ts_1, ts_2, INF, gNoMatching);\n BOOST_TEST( total_3, 10.0 );\n}\n\nBOOST_AUTO_TEST_CASE( easy_general_warped_distance, *boost::unit_test::tolerance(TOLERANCE) )\n{\n MockData data;\n TimeSeries ts_1{data.dat_1, 0, 0, 2};\n TimeSeries ts_2{data.dat_2, 0, 0, 2};\n\n TimeSeries ts_3{data.dat_3, 0, 0, 2};\n TimeSeries ts_4{data.dat_4, 0, 0, 5};\n\n TimeSeries ts_5{data.dat_5, 0, 0, 4};\n TimeSeries ts_6{data.dat_6, 0, 0, 4};\n\n TimeSeries ts_11{data.dat_11, 0, 0, 7};\n TimeSeries ts_12{data.dat_12, 0, 0, 7};\n\n setWarpingBandRatio(1.0);\n\n data_t total_0 = data.euclidean_warped_dist(ts_1, ts_2, INF, gNoMatching);\n BOOST_TEST( total_0 == sqrt(100.0) / (2 * 2) );\n\n data_t total_1 = data.euclidean_warped_dist(ts_3, ts_4, INF, gNoMatching);\n BOOST_TEST( total_1 == 0.0 );\n\n data_t total_2 = data.manhattan_warped_dist(ts_3, ts_4, INF, gNoMatching);\n BOOST_TEST( total_2 == 0.0 );\n\n data_t total_3 = data.chebyshev_warped_dist(ts_3, ts_4, INF, gNoMatching);\n BOOST_TEST( total_3 == 0.0 );\n\n data_t total_4 = data.euclidean_warped_dist(ts_5, ts_6, INF, gNoMatching);\n BOOST_TEST( total_4 == sqrt(1.0) / (2 * 4.0) );\n\n data_t total_5 = data.manhattan_warped_dist(ts_5, ts_6, INF, gNoMatching);\n BOOST_TEST( total_5 == 1.0/ (2 * 4.0) );\n\n data_t total_6 = data.chebyshev_warped_dist(ts_5, ts_6, INF, gNoMatching);\n BOOST_TEST( total_6 == 1.0 );\n\n data_t total_7 = data.euclidean_warped_dist(ts_11, ts_12, INF, gNoMatching);\n data_t result_7 = sqrt(12.0)/ (2 * 7);\n BOOST_TEST( total_7 == result_7 );\n\n data_t total_8 = data.manhattan_warped_dist(ts_11, ts_12, INF, gNoMatching);\n BOOST_TEST( total_8 == 8.0 / (2 * 7) );\n\n data_t total_9 = data.chebyshev_warped_dist(ts_11, ts_12, INF, gNoMatching);\n BOOST_TEST( total_9 == (2.0) );\n\n matching_t matching_1 = {};\n matching_t matching_1_test = {{0, 0}, {1, 1}};\n data_t total_10 = data.euclidean_warped_dist(ts_1, ts_2, INF, matching_1);\n BOOST_TEST( total_10 == sqrt(100.0) / (2 * 2) );\n BOOST_TEST( matching_1 == matching_1_test);\n\n matching_t matching_2 = {};\n matching_t matching_2_test = {{0, 0}, {0, 1}, {0, 2}, {1, 3}, {1, 4}};\n data_t total_11 = data.euclidean_warped_dist(ts_3, ts_4, INF, matching_2);\n BOOST_TEST( total_11 == 0 );\n BOOST_TEST( matching_2 == matching_2_test);\n\n matching_t matching_3 = {};\n matching_t matching_3_test = {{0, 0}, {0, 1}, {0, 2}, {1, 3}, {1, 4}};\n data_t total_12 = data.manhattan_warped_dist(ts_3, ts_4, INF, matching_3);\n BOOST_TEST( total_12 == 0 );\n BOOST_TEST( matching_3 == matching_3_test);\n\n matching_t matching_4 = {};\n matching_t matching_4_test = {{0, 0}, {0, 1}, {0, 2}, {1, 3}, {1, 4}};\n data_t total_13 = data.chebyshev_warped_dist(ts_3, ts_4, INF, matching_4);\n BOOST_TEST( total_13 == 0 );\n BOOST_TEST( matching_4 == matching_4_test);\n\n matching_t matching_5 = {};\n matching_t matching_5_test = {{0, 0}, {1, 1}, {2, 1}, {3, 2}, {3, 3}};\n data_t total_14 = data.euclidean_warped_dist(ts_5, ts_6, INF, matching_5);\n BOOST_TEST( total_14 == sqrt(1.0) / (2 * 4.0) );\n BOOST_TEST( matching_5 == matching_5_test);\n\n matching_t matching_6 = {};\n matching_t matching_6_test = {{0, 0}, {1, 1}, {2, 1}, {3, 2}, {3, 3}};\n data_t total_15 = data.manhattan_warped_dist(ts_5, ts_6, INF, matching_6);\n BOOST_TEST( total_15 == sqrt(1.0) / (2 * 4.0) );\n BOOST_TEST( matching_6 == matching_6_test);\n \n matching_t matching_7 = {};\n matching_t matching_7_test = {{0, 0}, {1, 1}, {2, 1}, {3, 2}, {3, 3}};\n data_t total_16 = data.chebyshev_warped_dist(ts_5, ts_6, INF, matching_7);\n BOOST_TEST( total_16 == 1.0 );\n BOOST_TEST( matching_7 == matching_7_test);\n}\n\nBOOST_AUTO_TEST_CASE( easy_gwd_dropout, *boost::unit_test::tolerance(TOLERANCE) )\n{\n MockData data;\n TimeSeries ts_3{data.dat_3, 0, 0, 2};\n TimeSeries ts_4{data.dat_4, 0, 0, 5};\n\n TimeSeries ts_7{data.dat_7, 0, 0, 4};\n TimeSeries ts_8{data.dat_8, 0, 0, 4};\n\n data_t total_1 = data.euclidean_warped_dist(ts_3, ts_4, 5, gNoMatching);\n BOOST_TEST( total_1 == 0.0 );\n\n data_t total_2 = data.manhattan_warped_dist(ts_3, ts_4, 5, gNoMatching);\n BOOST_TEST( total_2 == 0.0 );\n\n data_t total_3 = data.chebyshev_warped_dist(ts_3, ts_4, 5, gNoMatching);\n BOOST_TEST( total_3 == 0.0 );\n\n data_t total_5 = data.manhattan_warped_dist(ts_7, ts_8, 5, gNoMatching);\n BOOST_TEST( isinf(total_5) );\n\n data_t total_6 = data.chebyshev_warped_dist(ts_7, ts_8, 5, gNoMatching);\n BOOST_TEST( isinf(total_6) );\n}\n\nBOOST_AUTO_TEST_CASE( gwd_different_distances, *boost::unit_test::tolerance(TOLERANCE) )\n{\n MockData data;\n TimeSeries ts_9{data.dat_9, 0, 0, 6};\n TimeSeries ts_10{data.dat_10, 0, 0, 6};\n\n data_t total_1 = data.euclidean_warped_dist(ts_9, ts_10, INF, gNoMatching);\n BOOST_TEST( total_1 == sqrt(9.0)/(2 * 6) );\n\n data_t total_2 = data.manhattan_warped_dist(ts_9, ts_10, INF, gNoMatching);\n BOOST_TEST( total_2 == 7.0/ (2 * 6) );\n\n data_t total_3 = data.chebyshev_warped_dist(ts_9, ts_10, INF, gNoMatching);\n BOOST_TEST( total_3 == 2.0 );\n}\n\nBOOST_AUTO_TEST_CASE( get_distance_metric, *boost::unit_test::tolerance(TOLERANCE) )\n{\n MockData data;\n TimeSeries ts_for_function_call{data.dat_3, 0, 0, 2};\n const dist_t d = getDistanceFromName(\"euclidean\");\n BOOST_CHECK(d);\n}\n\nBOOST_AUTO_TEST_CASE( distance_not_found )\n{\n BOOST_CHECK_THROW( getDistanceFromName(\"oracle\"), GenexException );\n}\n\nBOOST_AUTO_TEST_CASE( keogh_lower_bound, *boost::unit_test::tolerance(TOLERANCE) )\n{\n MockData data;\n TimeSeries a{data.dat_13, 10};\n TimeSeries b{data.dat_14, 7};\n\n setWarpingBandRatio(0.2);\n data_t klb = keoghLowerBound(a, b, 10);\n\n BOOST_TEST( klb == sqrt(31.0) / (2 * 10) );\n}\n", "meta": {"hexsha": "de886ad90ecdb53dc8675b231bcd35df88a90966", "size": 7400, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/distance/DistanceTest.cpp", "max_stars_repo_name": "mihinsumaria/genex", "max_stars_repo_head_hexsha": "34786b0cf5d573348b82e5d164dbc05e0411d6a8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2020-06-28T07:36:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-11T07:49:24.000Z", "max_issues_repo_path": "test/distance/DistanceTest.cpp", "max_issues_repo_name": "mihinsumaria/genex", "max_issues_repo_head_hexsha": "34786b0cf5d573348b82e5d164dbc05e0411d6a8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/distance/DistanceTest.cpp", "max_forks_repo_name": "mihinsumaria/genex", "max_forks_repo_head_hexsha": "34786b0cf5d573348b82e5d164dbc05e0411d6a8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-12-01T20:25:42.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-01T20:25:42.000Z", "avg_line_length": 33.1838565022, "max_line_length": 93, "alphanum_fraction": 0.6852702703, "num_tokens": 2885, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7826624789529375, "lm_q2_score": 0.6370308013713525, "lm_q1q2_score": 0.49858010617067905}} {"text": "#include \n#include \n#include \n#include \n#include \n#include \nusing namespace boost::multiprecision;\nusing namespace std;\nvoid solve(cpp_int x, vector &v) {\n if (x > 3234566667) return;\n v.push_back(x);\n cpp_int base = x * 10, m = x % 10;\n for (int i = -1; i <= 1; i++) {\n if (m + i >= 0 && m + i <= 9) solve(base + (m + i), v);\n }\n}\nint main() {\n int k; cin >> k;\n vector v;\n for (int i = 1; i <= 9; i++) solve(i, v);\n sort(v.begin(), v.end());\n cout << v[k - 1] << endl;\n}\n", "meta": {"hexsha": "354a4ea955cbc0ec09da053b491b72a385d4eaeb", "size": 622, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "AtCoder/abc161/d/main.cpp", "max_stars_repo_name": "H-Tatsuhiro/Com_Pro-Cpp", "max_stars_repo_head_hexsha": "fd79f7821a76b11f4a6f83bbb26a034db577a877", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "AtCoder/abc161/d/main.cpp", "max_issues_repo_name": "H-Tatsuhiro/Com_Pro-Cpp", "max_issues_repo_head_hexsha": "fd79f7821a76b11f4a6f83bbb26a034db577a877", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2021-10-19T08:47:23.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-07T05:23:56.000Z", "max_forks_repo_path": "AtCoder/abc161/d/main.cpp", "max_forks_repo_name": "H-Tatsuhiro/Com_Pro-Cpp", "max_forks_repo_head_hexsha": "fd79f7821a76b11f4a6f83bbb26a034db577a877", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.9166666667, "max_line_length": 63, "alphanum_fraction": 0.5643086817, "num_tokens": 206, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7826624890918021, "lm_q2_score": 0.6370307806984444, "lm_q1q2_score": 0.4985800964495385}} {"text": "//------------------------------------------------------------------------------\n/*\n This file is part of rippled: https://github.com/ripple/rippled\n Copyright (c) 2012-2015 Ripple Labs Inc.\n\n Permission to use, copy, modify, and/or distribute this software for any\n purpose with or without fee is hereby granted, provided that the above\n copyright notice and this permission notice appear in all copies.\n\n THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n*/\n//==============================================================================\n\n#include \n#include \n#include \n#include \n#include \n\nnamespace ripple\n{\n\nstd::pair\nmulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)\n{\n using namespace boost::multiprecision;\n\n uint128_t result;\n result = multiply(result, value, mul);\n\n result /= div;\n\n auto constexpr limit = std::numeric_limits::max();\n\n if (result > limit)\n return { false, limit };\n\n return { true, static_cast(result) };\n}\n\n} // ripple\n", "meta": {"hexsha": "232817aaeab1b1f8e6db77756c2c2bae6c357f0f", "size": 1645, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/ripple/basics/impl/mulDiv.cpp", "max_stars_repo_name": "tlongwell-ripple/rippled", "max_stars_repo_head_hexsha": "4e3dc0e820aa5bcf187ebe1b5d4471932845cc89", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-05-02T01:19:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-02T01:19:23.000Z", "max_issues_repo_path": "src/ripple/basics/impl/mulDiv.cpp", "max_issues_repo_name": "tlongwell-ripple/rippled", "max_issues_repo_head_hexsha": "4e3dc0e820aa5bcf187ebe1b5d4471932845cc89", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 10.0, "max_issues_repo_issues_event_min_datetime": "2020-02-03T14:00:08.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-23T17:29:21.000Z", "max_forks_repo_path": "src/ripple/basics/impl/mulDiv.cpp", "max_forks_repo_name": "tlongwell-ripple/rippled", "max_forks_repo_head_hexsha": "4e3dc0e820aa5bcf187ebe1b5d4471932845cc89", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2020-02-03T14:22:58.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-28T20:57:19.000Z", "avg_line_length": 34.2708333333, "max_line_length": 80, "alphanum_fraction": 0.6443768997, "num_tokens": 367, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7826624789529375, "lm_q2_score": 0.6370307806984444, "lm_q1q2_score": 0.49858008999076964}} {"text": "#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n\n#include \n#include \n#include \n\ntypedef CGAL::Simple_cartesian Kernel;\ntypedef Kernel::Point_3 Point;\ntypedef Kernel::Vector_3 Vector;\ntypedef CGAL::Polyhedron_3 Polyhedron;\ntypedef boost::graph_traits::vertex_descriptor vertex_descriptor;\n\ntypedef CGAL::Mean_curvature_flow_skeletonization Mean_curvature_skeleton;\ntypedef Mean_curvature_skeleton::Skeleton Skeleton;\ntypedef boost::graph_traits::vertex_descriptor vertex_desc;\ntypedef boost::graph_traits::edge_descriptor edge_desc;\n\n// The input of the skeletonization algorithm must be a pure triangular closed\n// mesh and has only one component.\nbool is_mesh_valid(Polyhedron& pMesh)\n{\n if (!pMesh.is_closed())\n {\n std::cerr << \"The mesh is not closed.\";\n return false;\n }\n if (!pMesh.is_pure_triangle())\n {\n std::cerr << \"The mesh is not a pure triangle mesh.\";\n return false;\n }\n\n // the algorithm is only applicable on a mesh\n // that has only one connected component\n std::size_t num_component;\n CGAL::Counting_output_iterator output_it(&num_component);\n CGAL::internal::corefinement::extract_connected_components(pMesh, output_it);\n ++output_it;\n if (num_component != 1)\n {\n std::cerr << \"The mesh is not a single closed mesh. It has \"\n << num_component << \" components.\";\n return false;\n }\n return true;\n}\n\nint main()\n{\n Polyhedron mesh;\n std::ifstream input(\"data/elephant.off\");\n\n if ( !input || !(input >> mesh) || mesh.empty() ) {\n std::cerr << \"Cannot open data/elephant.off\" << std::endl;\n return EXIT_FAILURE;\n }\n if (!is_mesh_valid(mesh)) {\n return EXIT_FAILURE;\n }\n\n Skeleton skeleton;\n\n CGAL::extract_mean_curvature_flow_skeleton(mesh, skeleton);\n\n if (num_vertices(skeleton) == 0)\n {\n std::cerr << \"The number of skeletal points is zero!\\n\";\n return EXIT_FAILURE;\n }\n\n// check all vertices are seen exactly once\n{\n std::set visited;\n BOOST_FOREACH(vertex_desc v, vertices(skeleton))\n {\n BOOST_FOREACH(vertex_descriptor vd, skeleton[v].vertices)\n if (!visited.insert(vd).second)\n {\n std::cerr << \"A vertex was seen twice!\\n\";\n return EXIT_FAILURE;\n }\n }\n\n BOOST_FOREACH(vertex_descriptor vd, vertices(mesh))\n {\n if (!visited.count(vd))\n {\n std::cerr << \"A vertex was not seen!\\n\";\n return EXIT_FAILURE;\n }\n }\n}\n\n// check the skeleton is connected\n{\n std::queue qu;\n std::set visited;\n\n qu.push(*vertices(skeleton).first);\n visited.insert(qu.back());\n\n while (!qu.empty())\n {\n vertex_desc cur = qu.front();\n qu.pop();\n\n BOOST_FOREACH(edge_desc ed, in_edges(cur, skeleton))\n {\n vertex_desc next = source(ed, skeleton);\n if (visited.insert(next).second)\n qu.push(next);\n }\n }\n\n BOOST_FOREACH(vertex_desc vd, vertices(skeleton))\n {\n if (!visited.count(vd))\n {\n std::cerr << \"Skeleton curve is not fully connected!\\n\";\n return EXIT_FAILURE;\n }\n }\n}\n std::cout << \"Pass connectivity test.\\n\";\n return EXIT_SUCCESS;\n}\n\n", "meta": {"hexsha": "c6984fac8a5c1346102b93c51a82799bb26a38f7", "size": 3679, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "graphics/cgal/Surface_mesh_skeletonization/test/Surface_mesh_skeletonization/skeleton_connectivity_test.cpp", "max_stars_repo_name": "hlzz/dotfiles", "max_stars_repo_head_hexsha": "0591f71230c919c827ba569099eb3b75897e163e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2016-03-30T14:31:52.000Z", "max_stars_repo_stars_event_max_datetime": "2019-02-02T05:01:32.000Z", "max_issues_repo_path": "graphics/cgal/Surface_mesh_skeletonization/test/Surface_mesh_skeletonization/skeleton_connectivity_test.cpp", "max_issues_repo_name": "hlzz/dotfiles", "max_issues_repo_head_hexsha": "0591f71230c919c827ba569099eb3b75897e163e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "graphics/cgal/Surface_mesh_skeletonization/test/Surface_mesh_skeletonization/skeleton_connectivity_test.cpp", "max_forks_repo_name": "hlzz/dotfiles", "max_forks_repo_head_hexsha": "0591f71230c919c827ba569099eb3b75897e163e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.0514705882, "max_line_length": 93, "alphanum_fraction": 0.6605055722, "num_tokens": 897, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7826624688140728, "lm_q2_score": 0.6370307875894138, "lm_q1q2_score": 0.49858008892530375}} {"text": "/******************************************************\n Author : shipeng_liu \n Email : 1196075299@qq.com\n Description: Adaptive dynamic programming \n******************************************************/\n#include \n#include\n#include \n#include \n#include \n#include \"rambot_controller/src/controller/adp_learning/critic_network.cpp\"\nusing namespace std;\n\n/****************************************************************** \n Action Network\n Input: S, diff_s\n number of hidden_layer: 1\n number of active_function: hidden_number\n output: steer_cmd\n parameter_layer1: parameters from input to hidden_layer\n parameter_layer2: parameters from hidden_layer to output\n******************************************************************/\n\nclass action_network {\n\n private:\n\n double learning_rate = 0.00001 ; //learning rate\n double reward_count = 1;\n Eigen::MatrixXd parameter_layer1;\n Eigen::MatrixXd parameter_layer2;\n Eigen::MatrixXd input_variable;\n Eigen::MatrixXd hidden_layer;\n Eigen::MatrixXd p_hidden_layer;\n double output;\n int hidden_number;\n double deriva_J_u;\n static double active_function(double input) {\n // 1 - exp( - qi(t) ) / 1 + exp( - qi(t) ) )\n double ret = std::exp(- input);\n double tmp = (2)/(1 + ret) - 1;\n return tmp;\n }\n\n\n public:\n\n action_network() \n {\n hidden_number = 4;\n init();\n }\n\n void init()\n {\n parameter_layer1 = Eigen::MatrixXd::Random(2, hidden_number);\n parameter_layer2 = Eigen::MatrixXd::Random(hidden_number, 1);\n \n hidden_layer = Eigen::MatrixXd::Random(1, hidden_number);\n p_hidden_layer = Eigen::MatrixXd::Random(1, hidden_number);\n input_variable = Eigen::MatrixXd::Random(1,2);\n output = 0;\n\n }\n\n double output_cmd(double s, double diff_s)\n {\n input_variable(0,0) = s;\n input_variable(0,1) = diff_s;\n //cout << parameter_layer1 << endl;\n //cout << parameter_layer2 << endl;\n hidden_layer = input_variable * parameter_layer1;\n \n for (int i = 0; i < hidden_number; i++)\n {\n p_hidden_layer(0,i) = active_function(hidden_layer(0,i));\n // limit the p_Hidden_layer\n if (p_hidden_layer(0,i) < 0.000001 && p_hidden_layer(0,i) > 0 )\n p_hidden_layer(0,i) = 0.000001;\n else if (p_hidden_layer(0,i) > -0.000001 && p_hidden_layer(0,i) < 0)\n p_hidden_layer(0,i) = - 0.000001;\n }\n Eigen::MatrixXd test = p_hidden_layer * parameter_layer2;\n output = test(0,0);\n output = active_function(output);\n return output;\n }\n\n void update_weight(double J_cost, critic_network *critic_network)\n {\n \n \n double reference_signal = 0;\n double eat = J_cost - reference_signal;\n cout << \"\\naction_network_information:\\n\";\n cout << left << setw(20) << \"[ refer: \" << right\n << setw(20) << reference_signal << \"]\" << endl;\n cout << left << setw(20) << \"[ J_cost: \" << right\n << setw(20) << J_cost << \"]\" << endl;\n cout << left << setw(20) << \"[ eat: \" << right\n << setw(20) << eat << \"]\" << endl;\n cout << left << setw(20) << \"[ p_hidden_layer: \" << right\n << setw(20) << p_hidden_layer << \"]\" << endl;\n cout << \"[ p_hidden_layer: \" << endl;\n cout << p_hidden_layer << endl;\n \n // cout << \"[ parameter_layer1: \" << endl;\n // cout << parameter_layer1 << endl;\n \n // cout << \"[ parameter_layer2: \" << endl;\n // cout << parameter_layer2 << endl;\n \n \n // compute the derivative of J to U\n for (int l = 0; l < 4; l++)\n {\n deriva_J_u += ( critic_network->return_parameter_layer2()(l,0) ) * 0.5 * (1 - pow(critic_network->return_p_hidden_layer()(0,l),2)) * critic_network->return_parameter_layer1()(2,l);\n }\n for (int j = 0; j < hidden_number; j++)\n {\n //compute the parameter_layer1\n for (int k = 0; k < 2; k++)\n {\n \n double derivative_1 = reward_count * eat * 0.5 * (1-pow(output,2)) * parameter_layer2(j,0) * 0.5 * (1 - pow(p_hidden_layer(0,j),2)) * input_variable(0,k) * deriva_J_u;\n double delta_weight1 = learning_rate * (- derivative_1);\n \n parameter_layer1(k,j) += delta_weight1; \n }\n\n // compute the parameter_layer2 \n \n double derivative = reward_count * eat * 0.5 * (1-pow(output,2)) *(p_hidden_layer(0,j)) * deriva_J_u;\n \n double delta_weight = learning_rate * (- derivative);\n parameter_layer2(j,0) += delta_weight;\n\n }\n \n }\n};\n \n ", "meta": {"hexsha": "76ee7a787c1872d858cb59399894f23579be8370", "size": 4889, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/controller/src/controller/adp_learning/action_network.cpp", "max_stars_repo_name": "TJ-Work/CVSC", "max_stars_repo_head_hexsha": "6850bcffe765a6586dc5a81900206398be6dc1f5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/controller/src/controller/adp_learning/action_network.cpp", "max_issues_repo_name": "TJ-Work/CVSC", "max_issues_repo_head_hexsha": "6850bcffe765a6586dc5a81900206398be6dc1f5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/controller/src/controller/adp_learning/action_network.cpp", "max_forks_repo_name": "TJ-Work/CVSC", "max_forks_repo_head_hexsha": "6850bcffe765a6586dc5a81900206398be6dc1f5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.1888111888, "max_line_length": 192, "alphanum_fraction": 0.5313970137, "num_tokens": 1187, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8807970748488297, "lm_q2_score": 0.5660185351961015, "lm_q1q2_score": 0.4985474701109455}} {"text": "#pragma once\n\n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace std;\nusing namespace Eigen;\n\ntemplate \nstruct ParticleFilter{\n ParticleFilter(int number_of_particles, int states, function process, function cost,\n VectorXd &start, double std):\n number_of_particles(number_of_particles), process(process), cost(cost), start(start){\n particles.resize(number_of_particles, VectorXd(states));\n weights.resize(number_of_particles, 1.0/number_of_particles);\n costs.resize(number_of_particles,RAND_MAX);\n cumsum.resize(number_of_particles);\n indx.resize(number_of_particles);\n\n distribution = normal_distribution(0,std);\n state_length = particles.begin()->size();\n\n resample(true);\n }\n\n void step(){\n double sum_of_elems = 0.0;\n for(uint i=0;i(costs, val, index);\n winner = particles[*index];\n return val;\n }\n\n function cost;\n function process;\n\n void resample(bool all, int N = 0){\n if(all){ //resample all\n for(state &particle:particles){\n particle = start;\n for(int i=0;i indexes = sort_indexes(weights);\n for(uint i=0;i\n void minimum(vector &v, T &val, int *index ){\n *index = 0;\n val = v[0];\n for(uint i=1; i\n vector sort_indexes(const vector &v) {\n\n // initialize original index locations\n vector idx(v.size());\n iota(idx.begin(), idx.end(), 0);\n\n // sort indexes based on comparing values in v\n sort(idx.begin(), idx.end(),\n [&v](size_t i1, size_t i2) {return v[i1] > v[i2];});\n\n return idx;\n }\n\n vector particles;\n vector weights, costs;\n vector cumsum;\n vector indx;\n int number_of_particles;\n default_random_engine generator;\n normal_distribution distribution;\n int state_length;\n VectorXd start;\n};", "meta": {"hexsha": "e3fb3670439c17da1cb6778ac91f2ca5392fb855", "size": 3358, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/darkroom/ParticleFilter.hpp", "max_stars_repo_name": "Roboy/DarkRoom_rviz", "max_stars_repo_head_hexsha": "8f049218bc600d4b179303493a70bfe2389df73c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2017-07-06T15:34:19.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-04T00:22:54.000Z", "max_issues_repo_path": "include/darkroom/ParticleFilter.hpp", "max_issues_repo_name": "Roboy/DarkRoom_rviz", "max_issues_repo_head_hexsha": "8f049218bc600d4b179303493a70bfe2389df73c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/darkroom/ParticleFilter.hpp", "max_forks_repo_name": "Roboy/DarkRoom_rviz", "max_forks_repo_head_hexsha": "8f049218bc600d4b179303493a70bfe2389df73c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7008547009, "max_line_length": 120, "alphanum_fraction": 0.5652173913, "num_tokens": 767, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.810478926981208, "lm_q2_score": 0.6150878555160665, "lm_q1q2_score": 0.4985157451378339}} {"text": "/*\n * Path-related utility functions.\n *\n * Copyright (C) 2014-2015 DubinsPathPlanner.\n * Created by David Goodman \n * Redistribution and use of this file is allowed according to the terms of the MIT license.\n * For details see the LICENSE file distributed with DubinsPathPlanner.\n */\n#include \n#include \n#include \n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n\nusing Eigen::Vector3d;\nusing Eigen::Vector2d;\n\n\n#define HEADING_TOLERANCE 1E-10 // upperbound for Euclidean metric, radius safe?\n\nnamespace dpp {\n\n/**\n * Calculate the shortest Dubins' path distance to the node. Note that all angles\n * used in this function are heading angles from 0 at the y-axis, and\n * counter-clockwise is positive. \n * @FIXME Add RLR and LRL curves. Does this fix dist > 3*r?\n */\ndouble dubinsPathLength(VehicleConfiguration &Cs, VehicleConfiguration &Ce,\n double turnRadius) {\n double r = turnRadius; // shorter name\n Vector2d Ps = Vector2d(Cs.x(), Cs.y()),\n Pe = Vector2d(Ce.x(), Ce.y());\n double Xs = Cs.m_heading,\n Xe = Ce.m_heading;\n double dist = (Ps - Pe).norm();\n\n Logger::logDebug(DPP_LOGGER_VERBOSE_3) << \"Given Cs=\" << Cs << \", Ce=\" << Ce << \", r=\" << r << std::endl;\n Logger::logDebug(DPP_LOGGER_VERBOSE_3) << \"Got dist=\" << dist << \" compared to r=\" << r << \".\" << std::endl;\n\n\n // Added tolerance to avoid numerical instability for paths with no curviture\n // TODO compute curviture as ratio w.r.t. turn radius?\n // FIXME this does not take into account the feasability of the path! \n // Should add an isReachable( func)\n if (fabs(Xs - Xe) <= HEADING_TOLERANCE) {\n Logger::logDebug(DPP_LOGGER_VERBOSE_3) << \"Using straight line L=\" << (Pe - Ps).norm() << \". |Xs - Xe| = \"\n << fabs(Xs - Xe) << \" <= \" << HEADING_TOLERANCE << std::endl;\n return (Pe - Ps).norm();\n }\n\n //DPP_ASSERT(dist >= 3.0 * r);\n // FIXME: return an unfeasible path macro, eg -1\n // FIXME: improve this function so that we don't have to use the dubins-curves library\n // this is needed for boustrophedon algorithm which does not care if the\n // point dist is >= 3.0 * r\n // note: Detect if the final heading is close to the opposite of the initial,\n // to allow for implementing CCC-type paths (ie. U-turns) via Dubins\n // Corollarry to theorem 1. Though this may not satisfy all cases where \n // no feasible paths exist. \"U-turn\" is either: {RLR or LRL}?\n if (dist < 3.0 * r) {\n //return DPP_MAX_EDGE_COST;\n double *q0, *q1;\n Cs.asArray(&q0);\n Ce.asArray(&q1);\n q0[2] = dpp::headingToAngle(q0[2]);\n q1[2] = dpp::headingToAngle(q1[2]);\n DubinsCurves::DubinsPath path;\n DubinsCurves::dubins_init( q0, q1, turnRadius, &path);\n double expectedLength = DubinsCurves::dubins_path_length(&path);\n return expectedLength;\n }\n //if (dist < 3.0 * r) {\n // std::domain_error(\"distance must be larger than 3*r\");\n //}\n\n // Convert headings to circular angles\n double alpha = headingToAngle(Xs),\n beta = headingToAngle(Xe);\n\n // Find circle center points for each case\n Vector3d R_rs(cos(alpha - M_PI/2.0), sin(alpha - M_PI/2.0), 0.0),\n R_ls(cos(alpha + M_PI/2.0), sin(alpha + M_PI/2.0), 0.0),\n R_re(cos(beta - M_PI/2.0), sin(beta - M_PI/2.0), 0.0),\n R_le(cos(beta + M_PI/2.0), sin(beta + M_PI/2.0), 0.0);\n\n Vector3d PC_rs(Cs.x(), Cs.y(), 0.0),\n PC_ls(Cs.x(), Cs.y(), 0.0),\n PC_re(Ce.x(), Ce.y(), 0.0),\n PC_le(Ce.x(), Ce.y(), 0.0);\n\n PC_rs = PC_rs.transpose() + r*R_rs.transpose();\n PC_ls = PC_ls.transpose() + r*R_ls.transpose();\n PC_re = PC_re.transpose() + r*R_re.transpose();\n PC_le = PC_le.transpose() + r*R_le.transpose();\n\n Logger::logDebug(DPP_LOGGER_VERBOSE_3) << \"PC_rs: (\" << PC_rs.x() << \",\" << PC_rs.y()\n << \",\" << PC_rs.z() << \",\" <<\").\" << std::endl;\n Logger::logDebug(DPP_LOGGER_VERBOSE_3) << \"PC_ls: (\" << PC_ls.x() << \",\" << PC_ls.y()\n << \",\" << PC_ls.z() << \",\" <<\").\" << std::endl;\n Logger::logDebug(DPP_LOGGER_VERBOSE_3) << \"PC_re: (\" << PC_re.x() << \",\" << PC_re.y()\n << \",\" << PC_re.z() << \",\" <<\").\" << std::endl;\n Logger::logDebug(DPP_LOGGER_VERBOSE_3) << \"PC_le: (\" << PC_le.x() << \",\" << PC_le.y()\n << \",\" << PC_le.z() << \",\" <<\").\" << std::endl;\n\n // Case I, R-S-R\n double x = headingBetween(PC_rs, PC_re);\n double L1 = (PC_rs - PC_re).norm() \n + r*wrapAngle(2.0 * M_PI + wrapAngle(x - M_PI/2.0) - wrapAngle(Xs - M_PI/2.0))\n + r*wrapAngle(2.0 * M_PI + wrapAngle(Xe - M_PI/2.0) - wrapAngle(x - M_PI/2.0));\n\n Logger::logDebug(DPP_LOGGER_VERBOSE_3) << \"L1: \" << L1 << \", with x=\" << x << std::endl;\n\n // Case II, R-S-L\n double ls = (PC_le - PC_rs).norm();\n x = headingBetween(PC_rs, PC_le);\n double x2 = x - M_PI/2.0 + asin(2.0*r/ls);\n double L2 = sqrt(ls*ls - 4*r*r) + r*wrapAngle(2.0*M_PI + wrapAngle(x2)\n - wrapAngle(Xs - M_PI/2.0)) + r*wrapAngle(2.0*M_PI + wrapAngle(x2 + M_PI)\n - wrapAngle(Xe + M_PI/2.0));\n Logger::logDebug(DPP_LOGGER_VERBOSE_3) << \"L2: \" << L2 << \" with ls=\" << ls << \", x=\" << x << \", x2=\" << x2 << std::endl;\n\n // Case III, L-S-R\n ls = (PC_re - PC_ls).norm();\n x = headingBetween(PC_ls, PC_re);\n double ratioOA = 2.0*r/ls;\n // Bound the ratio from -1 to 1\n ratioOA = std::max (-1.0, ratioOA);\n ratioOA = std::min (1.0, ratioOA);\n DPP_ASSERT(ratioOA <= 1.0 && ratioOA >= -1.0);\n x2 = acos(ratioOA);\n double L3 = sqrt(ls*ls - 4*r*r) + r*wrapAngle(2.0*M_PI + wrapAngle(Xs + M_PI/2.0) \n - wrapAngle(x + x2)) + r*wrapAngle(2.0*M_PI + wrapAngle(Xe - M_PI/2.0)\n - wrapAngle(x + x2 - M_PI));\n\n Logger::logDebug(DPP_LOGGER_VERBOSE_3) << \"L3: \" << L3 << \" with ls=\" << ls << \", x=\" << x << \", x2=\"\n << x2 << std::endl;\n\n // Case IV, L-S-L\n x = headingBetween(PC_ls, PC_le);\n double L4 = (PC_ls - PC_le).norm() + r*wrapAngle(2.0*M_PI + wrapAngle(Xs + M_PI/2.0)\n - wrapAngle(x + M_PI/2.0)) + r*wrapAngle(2.0*M_PI + wrapAngle(x + M_PI/2.0)\n - wrapAngle(Xe + M_PI/2.0));\n\n Logger::logDebug(DPP_LOGGER_VERBOSE_3) << \"L4: \" << L4 << \", with x=\" << x << std::endl;\n Logger::logDebug(DPP_LOGGER_VERBOSE_3) << \"Comparing L1=\" << L1 << \" L2=\" << L2 << \" L3=\" << L3 << \" L4=\"\n << L4 << std::endl;\n\n return std::min({L1, L2, L3, L4});\n}\n\n/**\n * Finds the cost of the shortest dubins path over the given tour with headings.\n * If returnCost is true, the cost of returning back to the first node in the\n * tour will be included.\n * @param[in] G graph with nodes to tour\n * @param[in] GA attributes of the graph\n * @param[in] Tour ordered list of nodes to visit\n * @param[in] Headings for vehicle at each node\n * @param[in] turnRadius of the vehicle\n * @param[in] returnEdge whether to add a return edge back to the first node\n */\ndouble dubinsTourCost(ogdf::Graph &G, ogdf::GraphAttributes &GA,\n ogdf::List &Tour, ogdf::NodeArray &Headings,\n double turnRadius, bool returnCost) {\n ogdf::ListIterator iter;\n double cost = 0.0;\n\n if (Tour.size() < 2) {\n Logger::logWarn(DPP_LOGGER_VERBOSE_1) << \"Zero cost for an empty tour.\" << std::endl;\n return 0.0;\n }\n\n // Add the return edge if necessary\n ogdf::List modTour(Tour);\n ogdf::node lastNode = modTour.back();\n if (returnCost && lastNode != *(modTour.begin())) {\n modTour.pushBack(*(Tour.begin()));\n }\n else if (!returnCost && lastNode == *(modTour.begin())) {\n modTour.popBack();\n }\n\n int m = modTour.size() - 1;\n int i = 0; // edge index\n for ( iter = modTour.begin(); (i < m && iter != modTour.end()); iter++ ) {\n ogdf::node u = *iter, v = *(iter.succ());\n\n VehicleConfiguration Cu(GA.x(u), GA.y(u), Headings(u)),\n Cv(GA.x(v), GA.y(v), Headings(v));\n cost += dubinsPathLength(Cu, Cv, turnRadius);\n i++;\n \n Logger::logDebug(DPP_LOGGER_VERBOSE_3) << \"Found cost \" << cost << \" from node \"\n << GA.idNode(u) << \"->\" << GA.idNode(v) << \", where headings \"\n << GA.idNode(u) << \": \" << Headings(u) << \", \" << GA.idNode(v) << \": \"\n << Headings(v) << std::endl;\n }\n Logger::logDebug(DPP_LOGGER_VERBOSE_2) << \"Found total cost \"\n << cost << \" for tour.\" << std::endl;\n\n return cost;\n}\n\n/**\n * Adds weighted edges to the graph with the cost of the shortest dubins path\n * between each node in the tour. If returnEdge is true, an edge returning to the\n * origin is added. Added edges are saved into the list of edges.\n * @param[in] G graph with nodes to tour\n * @param[in] GA attributes of the graph\n * @param[in] Tour ordered list of nodes to visit\n * @param[in] Headings for vehicle at each node\n * @param[in] turnRadius of the vehicle\n * @param[out] Edges ordered list of edges to build\n * @param[in] returnEdge whether to add a return edge back to the first node\n */\ndouble createDubinsTourEdges(ogdf::Graph &G, ogdf::GraphAttributes &GA,\n ogdf::List &Tour, ogdf::NodeArray &Headings,\n double turnRadius, ogdf::List &Edges, bool returnEdge) {\n ogdf::ListIterator iter;\n double total_cost = 0.0;\n\n if (Tour.size() < 2) return 0.0;\n DPP_ASSERT(G.numberOfEdges() < 1);\n // std::range_error(\"Cannot have existing edges in graph\");\n //}\n\n // Add the return edge if necessary\n ogdf::List modTour(Tour);\n ogdf::node lastNode = modTour.back();\n if (returnEdge && lastNode != *(modTour.begin())) {\n modTour.pushBack(*(Tour.begin()));\n }\n else if (!returnEdge && lastNode == *(modTour.begin())) {\n modTour.popBack();\n }\n \n int m = modTour.size() - 1;\n int i = 0; // edge index\n for ( iter = modTour.begin(); (i < m && iter != modTour.end()); iter++ ) {\n ogdf::node u = *iter, v = *(iter.succ());\n\n VehicleConfiguration Cu(GA.x(u), GA.y(u), Headings(u)),\n Cv(GA.x(v), GA.y(v), Headings(v));\n double cost = dubinsPathLength(Cu, Cv, turnRadius);\n\n Logger::logDebug(DPP_LOGGER_VERBOSE_3) << \"Found cost \" << cost << \" from node \"\n << GA.idNode(u) << \"->\" << GA.idNode(v) << \", where headings \"\n << GA.idNode(u) << \": \" << Headings(u) << \", \" << GA.idNode(v) << \": \"\n << Headings(v) << std::endl;\n //printf(\"Found cost %0.1f from node %d->%d, where headings %d: %0.1f, %d: %0.1f\\n\",\n // cost, GA.idNode(u), GA.idNode(v), GA.idNode(u), Headings(u), GA.idNode(v), Headings(v));\n\n // Add the edge\n ogdf::edge e = G.newEdge(u,v);\n GA.doubleWeight(e) = cost;\n Edges.pushBack(e);\n total_cost += cost;\n i++;\n }\n Logger::logDebug(DPP_LOGGER_VERBOSE_2) << \"Created tour edges with total cost \"\n << total_cost << \": \" << std::endl;\n Logger::logDebug(DPP_LOGGER_VERBOSE_2) << printEdges(G, GA, Edges);\n\n return total_cost;\n}\n\n\n\n/**\n * Computes an adjacency matrix of Dubins path lengths between nodes with the\n * given headings (for ATSP).\n */\nvoid buildDubinsAdjacencyMatrix(ogdf::Graph &G, ogdf::GraphAttributes &GA, \n NodeMatrix &A, ogdf::NodeArray &Headings, double turnRadius) {\n \n ogdf::node i, j;\n forall_nodes(i, G) {\n VehicleConfiguration Ci(GA.x(i), GA.y(i), Headings(i));\n\n forall_nodes(j, G) {\n if (i == j) {\n A[i][i] = DPP_MAX_EDGE_COST;\n continue;\n }\n VehicleConfiguration Cj(GA.x(j), GA.y(j), Headings(j));\n \n double w = dubinsPathLength(Ci, Cj, turnRadius);\n A[i][j] = w;\n }\n }\n}\n\n} // namespace DPP\n\n", "meta": {"hexsha": "259c50d91080b6fd6c21baf3099aa9e47f4350f8", "size": 11966, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/dpp/basic/Path.cpp", "max_stars_repo_name": "dagoodma/dubins_coverage", "max_stars_repo_head_hexsha": "f05333b55fb2cb073bb572562726f8b711df54ad", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11.0, "max_stars_repo_stars_event_min_datetime": "2017-09-28T00:56:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T16:02:56.000Z", "max_issues_repo_path": "src/dpp/basic/Path.cpp", "max_issues_repo_name": "dagoodma/dubins_coverage", "max_issues_repo_head_hexsha": "f05333b55fb2cb073bb572562726f8b711df54ad", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/dpp/basic/Path.cpp", "max_forks_repo_name": "dagoodma/dubins_coverage", "max_forks_repo_head_hexsha": "f05333b55fb2cb073bb572562726f8b711df54ad", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11.0, "max_forks_repo_forks_event_min_datetime": "2016-12-16T03:32:45.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-31T06:54:03.000Z", "avg_line_length": 39.4917491749, "max_line_length": 125, "alphanum_fraction": 0.5828179843, "num_tokens": 3670, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8104789178257653, "lm_q2_score": 0.6150878555160665, "lm_q1q2_score": 0.49851573950643224}} {"text": "///////////////////////////////////////////////////////////////////////////////\r\n// weighted_variance.hpp\r\n//\r\n// Copyright 2005 Daniel Egloff, Eric Niebler. Distributed under the Boost\r\n// Software License, Version 1.0. (See accompanying file\r\n// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\r\n\r\n#ifndef BOOST_ACCUMULATORS_STATISTICS_WEIGHTED_VARIANCE_HPP_EAN_28_10_2005\r\n#define BOOST_ACCUMULATORS_STATISTICS_WEIGHTED_VARIANCE_HPP_EAN_28_10_2005\r\n\r\n#include \r\n#include \r\n#include \r\n#include \r\n#include \r\n#include \r\n#include \r\n#include \r\n#include \r\n#include \r\n#include \r\n#include \r\n\r\nnamespace boost { namespace accumulators\r\n{\r\n\r\nnamespace impl\r\n{\r\n //! Lazy calculation of variance of weighted samples.\r\n /*!\r\n The default implementation of the variance of weighted samples is based on the second moment\r\n \\f$\\widehat{m}_n^{(2)}\\f$ (weighted_moment<2>) and the mean\\f$ \\hat{\\mu}_n\\f$ (weighted_mean):\r\n \\f[\r\n \\hat{\\sigma}_n^2 = \\widehat{m}_n^{(2)}-\\hat{\\mu}_n^2,\r\n \\f]\r\n where \\f$n\\f$ is the number of samples.\r\n */\r\n template\r\n struct lazy_weighted_variance_impl\r\n : accumulator_base\r\n {\r\n typedef typename numeric::functional::multiplies::result_type weighted_sample;\r\n // for boost::result_of\r\n typedef typename numeric::functional::average::result_type result_type;\r\n\r\n lazy_weighted_variance_impl(dont_care) {}\r\n\r\n template\r\n result_type result(Args const &args) const\r\n {\r\n extractor const some_mean = {};\r\n result_type tmp = some_mean(args);\r\n return weighted_moment<2>(args) - tmp * tmp;\r\n }\r\n };\r\n\r\n //! Iterative calculation of variance of weighted samples.\r\n /*!\r\n Iterative calculation of variance of weighted samples:\r\n \\f[\r\n \\hat{\\sigma}_n^2 =\r\n \\frac{\\bar{w}_n - w_n}{\\bar{w}_n}\\hat{\\sigma}_{n - 1}^2\r\n + \\frac{w_n}{\\bar{w}_n - w_n}\\left(X_n - \\hat{\\mu}_n\\right)^2\r\n ,\\quad n\\ge2,\\quad\\hat{\\sigma}_0^2 = 0.\r\n \\f]\r\n where \\f$\\bar{w}_n\\f$ is the sum of the \\f$n\\f$ weights \\f$w_i\\f$ and \\f$\\hat{\\mu}_n\\f$\r\n the estimate of the mean of the weighted smaples. Note that the sample variance is not defined for\r\n \\f$n <= 1\\f$.\r\n */\r\n template\r\n struct weighted_variance_impl\r\n : accumulator_base\r\n {\r\n typedef typename numeric::functional::multiplies::result_type weighted_sample;\r\n // for boost::result_of\r\n typedef typename numeric::functional::average::result_type result_type;\r\n\r\n template\r\n weighted_variance_impl(Args const &args)\r\n : weighted_variance(numeric::average(args[sample | Sample()], numeric::one::value))\r\n {\r\n }\r\n\r\n template\r\n void operator ()(Args const &args)\r\n {\r\n std::size_t cnt = count(args);\r\n\r\n if(cnt > 1)\r\n {\r\n extractor const some_mean = {};\r\n\r\n result_type tmp = args[parameter::keyword::get()] - some_mean(args);\r\n\r\n this->weighted_variance =\r\n numeric::average(this->weighted_variance * (sum_of_weights(args) - args[weight]), sum_of_weights(args))\r\n + numeric::average(tmp * tmp * args[weight], sum_of_weights(args) - args[weight] );\r\n }\r\n }\r\n\r\n result_type result(dont_care) const\r\n {\r\n return this->weighted_variance;\r\n }\r\n\r\n private:\r\n result_type weighted_variance;\r\n };\r\n\r\n} // namespace impl\r\n\r\n///////////////////////////////////////////////////////////////////////////////\r\n// tag::weighted_variance\r\n// tag::immediate_weighted_variance\r\n//\r\nnamespace tag\r\n{\r\n struct lazy_weighted_variance\r\n : depends_on, weighted_mean>\r\n {\r\n /// INTERNAL ONLY\r\n ///\r\n typedef accumulators::impl::lazy_weighted_variance_impl impl;\r\n };\r\n\r\n struct weighted_variance\r\n : depends_on\r\n {\r\n /// INTERNAL ONLY\r\n ///\r\n typedef accumulators::impl::weighted_variance_impl impl;\r\n };\r\n}\r\n\r\n///////////////////////////////////////////////////////////////////////////////\r\n// extract::weighted_variance\r\n// extract::immediate_weighted_variance\r\n//\r\nnamespace extract\r\n{\r\n extractor const lazy_weighted_variance = {};\r\n extractor const weighted_variance = {};\r\n}\r\n\r\nusing extract::lazy_weighted_variance;\r\nusing extract::weighted_variance;\r\n\r\n// weighted_variance(lazy) -> lazy_weighted_variance\r\ntemplate<>\r\nstruct as_feature\r\n{\r\n typedef tag::lazy_weighted_variance type;\r\n};\r\n\r\n// weighted_variance(immediate) -> weighted_variance\r\ntemplate<>\r\nstruct as_feature\r\n{\r\n typedef tag::weighted_variance type;\r\n};\r\n\r\n////////////////////////////////////////////////////////////////////////////\r\n//// droppable_accumulator\r\n//// need to specialize droppable lazy weighted_variance to cache the result at the\r\n//// point the accumulator is dropped.\r\n///// INTERNAL ONLY\r\n/////\r\n//template\r\n//struct droppable_accumulator >\r\n// : droppable_accumulator_base<\r\n// with_cached_result >\r\n// >\r\n//{\r\n// template\r\n// droppable_accumulator(Args const &args)\r\n// : droppable_accumulator::base(args)\r\n// {\r\n// }\r\n//};\r\n\r\n}} // namespace boost::accumulators\r\n\r\n#endif\r\n", "meta": {"hexsha": "41b4fb286266c68ef91185bd0343b24dd9099c28", "size": 6634, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "windows/include/boost/accumulators/statistics/weighted_variance.hpp", "max_stars_repo_name": "jaredhoberock/gotham", "max_stars_repo_head_hexsha": "e3551cc355646530574d086d7cc2b82e41e8f798", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6.0, "max_stars_repo_stars_event_min_datetime": "2015-12-29T07:21:01.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-29T10:47:38.000Z", "max_issues_repo_path": "windows/include/boost/accumulators/statistics/weighted_variance.hpp", "max_issues_repo_name": "jaredhoberock/gotham", "max_issues_repo_head_hexsha": "e3551cc355646530574d086d7cc2b82e41e8f798", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "windows/include/boost/accumulators/statistics/weighted_variance.hpp", "max_forks_repo_name": "jaredhoberock/gotham", "max_forks_repo_head_hexsha": "e3551cc355646530574d086d7cc2b82e41e8f798", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0543478261, "max_line_length": 124, "alphanum_fraction": 0.6276756105, "num_tokens": 1461, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8104789086703224, "lm_q2_score": 0.6150878555160665, "lm_q1q2_score": 0.49851573387503045}} {"text": "#ifndef QUADEIGS_H_\n#define QUADEIGS_H_\n\n#include \n#include \n\nclass QuadEigs {\npublic:\n QuadEigs(const Eigen::Ref &matM,\n const Eigen::Ref &matD,\n const Eigen::Ref &matK);\n\n Eigen::VectorXcd eigenvalues(int m);\n\nprivate:\n const int ndim_;\n Eigen::MatrixXd matM_, matD_, matK_;\n Eigen::MatrixXd matA_, matB_;\n};\n\n#endif", "meta": {"hexsha": "315abbe05580daf371d7cab0b566f5641d26961e", "size": 433, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/quadeigs.hpp", "max_stars_repo_name": "pan3rock/QuadEigsSOAR", "max_stars_repo_head_hexsha": "6b4a2e939c8987773cd7990f665e9ebf57ecdbde", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/quadeigs.hpp", "max_issues_repo_name": "pan3rock/QuadEigsSOAR", "max_issues_repo_head_hexsha": "6b4a2e939c8987773cd7990f665e9ebf57ecdbde", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/quadeigs.hpp", "max_forks_repo_name": "pan3rock/QuadEigsSOAR", "max_forks_repo_head_hexsha": "6b4a2e939c8987773cd7990f665e9ebf57ecdbde", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.619047619, "max_line_length": 58, "alphanum_fraction": 0.6951501155, "num_tokens": 122, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8104788903594354, "lm_q2_score": 0.6150878555160665, "lm_q1q2_score": 0.4985157226122263}} {"text": "#include \r\n#include \r\n\r\nusing namespace Rcpp;\r\n\r\n// [[Rcpp::plugins(\"cpp11\")]]\r\n// [[Rcpp::depends(BH)]]\r\n\r\n//'Estep in girt.\r\n//'\r\n//'@param xall Item response matrix.\r\n//'@param t0 item parameter vector\r\n//'@param Xm node of theta dist.\r\n//'@param Wm weight of theta dist.\r\n//'@param group a vector.\r\n//'@param ind a design matrix for group.\r\n//'@param resp a design matrix for person.\r\n//'@param D factor constant.\r\n//'@param MLL a vector\r\n//'@export\r\n// [[Rcpp::export]]\r\n\r\nList Estep_irt(\r\n IntegerMatrix xall,\r\n NumericMatrix t0, // a, b, c\r\n NumericVector Xm,\r\n NumericMatrix Wm,\r\n IntegerVector group,\r\n IntegerMatrix ind, // design matrix\r\n IntegerMatrix resp, // design matrix\r\n double D,\r\n NumericVector MLL\r\n){\r\n\r\n int nj = xall.ncol();// item n\r\n int ni = xall.nrow(); // subject n\r\n int N = Xm.length(); // node n\r\n int ng = max(group); // group n\r\n\r\n double a,b,c,t,tt,u,x;\r\n int g,i,m,j;\r\n\r\n boost::multi_array Lim (boost::extents[ng][ni][N]);\r\n boost::multi_array Gim (boost::extents[ng][ni][N]);\r\n boost::multi_array Njm (boost::extents[ng][nj][N]);\r\n boost::multi_array rjm (boost::extents[ng][nj][N]);\r\n\r\n for(g=0; g(f)){\r\n // \u5bfe\u6570\u5c24\u5ea6\u306e\u8a08\u7b97\u306b\u5931\u6557\u3057\u305f\u3089\uff0c\u8a08\u7b97\u3092\u4e2d\u6b62\u3059\u308b\u3002\r\n stop(\"Can't calculate marginal log likelihood.\");\r\n }\r\n\r\n //Rcout<<\"expected frequency of subjects in each nodes calculation.\\n\";\r\n double k,kk;\r\n for(int g=0; g\n\n#include \n#include \n\n#include \n\n#include \n#include \n\n#include \n#include \n\n#if defined DPLX_COMP_MSVC_AVAILABLE\n#include \n#endif\n\nnamespace dplx::dp::detail\n{\n\ntemplate \nconstexpr auto find_last_set_bit(T value) noexcept -> int\n{\n static_assert(std::is_integral_v);\n static_assert(std::is_unsigned_v);\n static_assert(sizeof(T) <= sizeof(unsigned long long));\n\n // #LangODR to be resolved after MSVC supports __cpp_lib_bitops\n#if __cpp_lib_bitops >= 201907L && __cpp_lib_is_constant_evaluated >= 201811L\n\n if (std::is_constant_evaluated())\n {\n return (digits_v - 1) - std::countl_zero(value);\n }\n\n#endif\n\n#if defined(DPLX_COMP_GCC_AVAILABLE) || defined(DPLX_COMP_CLANG_AVAILABLE)\n\n if constexpr (sizeof(T) <= sizeof(unsigned int))\n {\n return (digits_v - 1)\n ^ __builtin_clz(static_cast(value));\n }\n else if constexpr (sizeof(T) <= sizeof(unsigned long))\n {\n return (digits_v - 1)\n ^ __builtin_clzl(static_cast(value));\n }\n else /*if constexpr (sizeof(T) <= sizeof(unsigned long long))\n see static_assert above */\n {\n return (digits_v - 1)\n ^ __builtin_clzll(static_cast(value));\n }\n\n#elif defined(DPLX_COMP_MSVC_AVAILABLE)\n\n unsigned long result;\n if constexpr (sizeof(T) <= sizeof(unsigned long))\n {\n _BitScanReverse(&result, static_cast(value));\n return static_cast(result);\n }\n else if constexpr (sizeof(T) <= sizeof(unsigned long long))\n {\n#if defined(_M_ARM64) || defined(_M_AMD64)\n\n _BitScanReverse64(&result, static_cast(value));\n return static_cast(result);\n\n#else\n\n static_assert(sizeof(unsigned long) * 2 == sizeof(unsigned long long));\n\n if (_BitScanReverse(&result, static_cast(\n value >> digits_v)))\n {\n return static_cast(result + digits_v);\n }\n else\n {\n _BitScanReverse(&result, static_cast(value));\n return static_cast(result);\n }\n#endif\n }\n\n#else\n\n return (digits_v - 1) ^ std::countl_zero(value);\n\n#endif\n}\n\ntemplate \nconstexpr auto rotl(T const v, int n) noexcept -> T\n{\n return (v << n) | (v >> (digits_v - n));\n}\n\ntemplate \nconstexpr auto load(std::byte const *const src) -> T\n{\n // static_assert(order == std::endian::native);\n static_assert(order == std::endian::big || order == std::endian::little);\n\n if (std::is_constant_evaluated())\n {\n static_assert(sizeof(T) <= 8);\n using uT = std::make_unsigned_t;\n if constexpr (order == std::endian::little)\n {\n uT acc = std::to_integer(src[0]);\n if constexpr (sizeof(acc) >= 2)\n {\n acc |= std::to_integer(src[1]) << 8;\n }\n if constexpr (sizeof(acc) >= 4)\n {\n acc |= std::to_integer(src[2]) << 16\n | std::to_integer(src[3]) << 24;\n }\n if constexpr (sizeof(acc) == 8)\n {\n acc |= std::to_integer(src[4]) << 32\n | std::to_integer(src[5]) << 40\n | std::to_integer(src[6]) << 48\n | std::to_integer(src[7]) << 56;\n }\n return static_cast(acc);\n }\n else\n {\n uT acc = std::to_integer(src[0]) << 56;\n if constexpr (sizeof(acc) >= 2)\n {\n acc |= std::to_integer(src[1]) << 48;\n }\n if constexpr (sizeof(acc) >= 4)\n {\n acc |= std::to_integer(src[2]) << 40\n | std::to_integer(src[3]) << 32;\n }\n if constexpr (sizeof(acc) == 8)\n {\n acc |= std::to_integer(src[4]) << 24\n | std::to_integer(src[5]) << 16\n | std::to_integer(src[6]) << 8\n | std::to_integer(src[7]);\n }\n return static_cast(acc >> (64 - digits_v));\n }\n }\n else\n {\n constexpr auto boostOrder = order == std::endian::little\n ? boost::endian::order::little\n : boost::endian::order::big;\n return boost::endian::endian_load(\n reinterpret_cast(src));\n // T assembled;\n // std::memcpy(&assembled, src, sizeof(assembled));\n // return assembled;\n }\n}\n\ntemplate \nconstexpr auto load_partial(const std::byte *data, int num) -> T\n{\n static_assert(sizeof(T) <= 8);\n static_assert(order == std::endian::big || order == std::endian::little);\n\n using uT = std::make_unsigned_t;\n if constexpr (order == std::endian::little)\n {\n uT assembled = 0;\n switch (num)\n {\n case 7:\n if constexpr (sizeof(assembled) == 8)\n {\n assembled |= std::to_integer(data[6]) << 48;\n }\n [[fallthrough]];\n case 6:\n if constexpr (sizeof(assembled) == 8)\n {\n assembled |= std::to_integer(data[5]) << 40;\n }\n [[fallthrough]];\n case 5:\n if constexpr (sizeof(assembled) == 8)\n {\n assembled |= std::to_integer(data[4]) << 32;\n }\n [[fallthrough]];\n\n case 4:\n if constexpr (sizeof(assembled) >= 4)\n {\n assembled |= std::to_integer(data[3]) << 24;\n }\n [[fallthrough]];\n case 3:\n if constexpr (sizeof(assembled) >= 4)\n {\n assembled |= std::to_integer(data[2]) << 16;\n }\n [[fallthrough]];\n\n case 2:\n if constexpr (sizeof(assembled) >= 2)\n {\n assembled |= std::to_integer(data[1]) << 8;\n }\n [[fallthrough]];\n\n case 1:\n assembled |= std::to_integer(data[0]);\n [[fallthrough]];\n\n case 0:\n break;\n }\n return assembled;\n }\n else\n {\n }\n} // namespace dplx::dp::detail\n\nconstexpr auto byte_swap_u32(std::uint32_t const x) noexcept -> std::uint32_t\n{\n if (std::is_constant_evaluated())\n {\n // byte_swap adapted from Boost.Endian\n // Copyright 2019 Peter Dimov\n //\n // Distributed under the Boost Software License, Version 1.0.\n // http://www.boost.org/LICENSE_1_0.txt\n //\n // -- portable approach suggested by tymofey, with avoidance of\n // undefined behavior as suggested by Giovanni Piero Deretta,\n // with a further refinement suggested by Pyry Jahkola.\n\n std::uint32_t const step16 = x << 16 | x >> 16;\n return ((step16 << 8) & 0xff00ff00) | ((step16 >> 8) & 0x00ff00ff);\n }\n else\n {\n return boost::endian::endian_reverse(x);\n }\n}\n\n} // namespace dplx::dp::detail\n", "meta": {"hexsha": "b1952fb1ede6e5425ef286d6b4d2196d767a517b", "size": 7852, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/dplx/dp/detail/bit.hpp", "max_stars_repo_name": "deeplex/deeppack", "max_stars_repo_head_hexsha": "0a94f61ee441eba7f0b280d2493505860c5b89cd", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/dplx/dp/detail/bit.hpp", "max_issues_repo_name": "deeplex/deeppack", "max_issues_repo_head_hexsha": "0a94f61ee441eba7f0b280d2493505860c5b89cd", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/dplx/dp/detail/bit.hpp", "max_forks_repo_name": "deeplex/deeppack", "max_forks_repo_head_hexsha": "0a94f61ee441eba7f0b280d2493505860c5b89cd", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5187969925, "max_line_length": 80, "alphanum_fraction": 0.5323484463, "num_tokens": 1963, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8056321889812553, "lm_q2_score": 0.6187804337438501, "lm_q1q2_score": 0.49850943533582853}} {"text": "#ifndef KMEANS_HPP_\n#define KMEANS_HPP_\n\n#include \n#include \n// #include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n\nclass Kmeans\n{\n\ttypedef CGAL::Simple_cartesian K;\n\ttypedef K::Point_3 Point_3;\n typedef CGAL::Point_set_3 Point_set;\n\npublic:\n\tKmeans();\n\t~Kmeans() {}\n\tKmeans(std::string file);\n\t/**\n\t * @brief Set the Input object\n\t * \n\t * @param source_path \n\t */\n\tbool setInput(std::string source);\n\n\tvoid setSimilarityModel(int flag);\n\t\n\t/**\n\t * @brief Set the Numberof Clusters and select init centers indices\n\t * \n\t * @param number_of_clusters the number of clusters\n\t */\n\tvoid setNumberofClusters(int number_of_clusters);\n\t/**\n\t * @brief Set the Init Cluster Center object\n\t * \n\t * @param init_centers the indices of init_centers point, if you should assign the init point,\n\t * \t\t\t\t\t you should use this function\n\t */\n\tvoid setInitClusterCenter(const std::vector init_centers);\n\n\t/**\n\t * @brief Set the Max Iterations\n\t * \n\t * @param maxiter the max number of iteration\n\t */\n\tvoid setMaxIterations(int maxiter) {\n\t\tmaxiter_ = maxiter;\n\t};\n\n\t/**\n\t * @brief Set the Max Correspondence Distance \n\t * \n\t * @param maxcor \n\t */\n\tvoid setMaxCorrespondenceDistance(float maxcor) {\n\t\tmaxcor_ = maxcor;\n\t};\n\n\t/**\n\t * @brief execute kmean\n\t * \n\t * @return true \n\t * @return false \n\t */\n\tbool compute();\n\n\t/**\n\t * @brief get result\n\t * \n\t */\n\tbool save_clusters(std::string path, int minsize);\n\nprivate:\n\t/**\n\t * @brief input point cloud\n\t */\n\tPoint_set points_;\n\n\t/**\n\t * @brief the indices of point which is center\n\t * \n\t */\n\t// std::vector centers_indices_;\n\tstd::vector centers_points_;\n\n\t/**\n\t * @brief indices of each clusters\n\t * \n\t */\n\tstd::vector> clusters_indices_;\n\n\t/**\n\t * @brief assign centers\n\t * \n\t */\n\tvoid assign_centers();\n\n\t/**\n\t * @brief compute threshold between centers and cluster center\n\t * \n\t * @return true meet the requirement\n\t * @return false \n\t */\n\tbool compute_threshold();\n\n\t/**\n\t * @brief some parameters\n\t * \n\t */\n\tint maxiter_;\n\tint finaliter_;\n\tfloat maxcor_;\n\tint number_of_clusters_;\n};\n\nKmeans::Kmeans()\n{\n\tLOG(INFO) << \"construct kmeans ...\";\n\tfinaliter_ = 0;\n\tmaxiter_ = 100;\n\tmaxcor_ = 10.0;\n\tnumber_of_clusters_ = 10;\n\tclusters_indices_.resize(number_of_clusters_);\n}\n\nKmeans::Kmeans(std::string file)\n{\n\tLOG(INFO) << \"construct kmeans ...\";\n\tsetInput(file);\n\n\tfinaliter_ = 0;\n\tmaxiter_ = 100;\n\tmaxcor_ = 1.0;\n\tnumber_of_clusters_ = 10;\n\tclusters_indices_.resize(number_of_clusters_);\n}\n\nbool Kmeans::setInput(std::string source)\n{\n\tLOG(INFO) << \"read data from file path\";\n\tCGAL::IO::read_XYZ(source, points_);\n\tif (points_.size() == 0) {\n\t\tLOG(INFO) << \"input data is zero\";\n\t\treturn false;\n\t}\n\treturn true;\n}\n\nvoid Kmeans::setNumberofClusters(int number_of_clusters)\n{\n\tLOG(INFO) << \"set number of clusters\";\n\tnumber_of_clusters_ = number_of_clusters;\n centers_points_.resize(number_of_clusters_);\n\n\tsrand((int)time(0));\n\t// selsect number_of_clusters different points\n\tfor (int i = 0; i < number_of_clusters_; i++) {\n\t\tcenters_points_[i] = points_.point(rand() % points_.size());\n\t}\n}\n\nvoid Kmeans::setInitClusterCenter(const std::vector init_centers)\n{\n\tnumber_of_clusters_ = init_centers.size();\n centers_points_.resize(number_of_clusters_);\n\tfor (int i = 0; i < init_centers.size(); ++i)\n\t\tcenters_points_[i] = init_centers[i];\n}\n\nbool Kmeans::compute_threshold() {\n\t// get points from points_\n\tLOG(INFO) << \"computer center and updata centers\";\n\tbool isUpdate = false;\n\tfloat x = 0, y = 0, z = 0;\n\t// Point_set temp_points;\n\tstd::vector temp_points;\n\n\tfor (int i = 0; i < clusters_indices_.size(); i++) {\n\t\tPoint_3 center = centers_points_[i];\n\t\tfor (int j = 0; j < clusters_indices_[i].size(); j++) {\n\t\t\tPoint_3 p = points_.point(j);\n\t\t\ttemp_points.push_back(p);\n\t\t}\n\t\tPoint_3 temp_centroid = CGAL::centroid(temp_points.begin(),\n\t\t\t\t\t\t\t\t\t\t\t temp_points.end(),\n\t\t\t\t\t\t\t\t\t\t\t CGAL::Dimension_tag<0>());\n\n\t\tfloat temp = CGAL::squared_distance(center, temp_centroid);\n\t\tif (temp > maxcor_) {\n\t\t\tcenters_points_[i] = Point_3(x, y, z);\n\t\t\tisUpdate = true;\n\t\t}\n\t}\n\tLOG(INFO) << \"compute_threshold && update center successful\";\n\treturn isUpdate;\n}\n\nvoid Kmeans::assign_centers() {\n\tLOG(INFO) << \"assign points to target centers\";\n\tfor (int i = 0; i < points_.size(); ++i) {\n\t\tfloat min_distance = std::numeric_limits::max();\n\t\tint min_dis_clusters = -1;\n\t\tPoint_3 temp_p = points_.point(i);\n\t\tfor (int j = 0; j < number_of_clusters_; ++j) {\n\t\t\tfloat temp = CGAL::squared_distance(temp_p, centers_points_[j]);\n\t\t\tif (temp < min_distance) {\n\t\t\t\tmin_dis_clusters = j;\n\t\t\t\tmin_distance = temp;\n\t\t\t}\n\t\t}\n\t\tclusters_indices_[min_dis_clusters].push_back(i);\n\t}\n\tLOG(INFO) << \"assign_centers successful\";\n}\n\nbool Kmeans::compute() {\n\tLOG(INFO) << \"compute ... \";\n\tbool isUpdate = true;\n\tint iter = 0;\n\twhile (isUpdate) {\n\t\tassign_centers();\n\t\tisUpdate = compute_threshold();\n\t\titer ++;\n\t\tif (iter == maxiter_)\n\t\t\tbreak;\n\t}\n\tfinaliter_ = iter;\n LOG(INFO) << \"compute successful \";\n\n\treturn true;\n}\n\n// @TODOS add\nbool Kmeans::save_clusters(std::string path, int minsize = std::numeric_limits::min()) {\n std::string output_file = \"\";\n for (int i = 0; i < clusters_indices_.size(); ++i) {\n output_file = path + \"/\" + std::to_string(i) + \".asc\";\n LOG(INFO) << \"output_file = \" << output_file;\n auto temp = clusters_indices_[i];\n if (temp.size() > minsize) {\n std::ofstream out(output_file);\n for (int j = 0; j < temp.size(); j++)\n out << points_.point(temp[j]).x() << \" \" \n\t\t\t\t\t<< points_.point(temp[j]).y() << \" \"\n\t\t\t\t\t<< points_.point(temp[j]).z() << std::endl;\n out.close();\n } \n }\n\n return true;\n}\n\n\n#endif\n", "meta": {"hexsha": "180547e1fa593a6e7b60478cd43346fec4ac2dba", "size": 6030, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "kmeans.hpp", "max_stars_repo_name": "GreenAvocado92/Kmeans", "max_stars_repo_head_hexsha": "4a0f1f378f112c43c09614f1c38c1e3f53c14c2f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "kmeans.hpp", "max_issues_repo_name": "GreenAvocado92/Kmeans", "max_issues_repo_head_hexsha": "4a0f1f378f112c43c09614f1c38c1e3f53c14c2f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kmeans.hpp", "max_forks_repo_name": "GreenAvocado92/Kmeans", "max_forks_repo_head_hexsha": "4a0f1f378f112c43c09614f1c38c1e3f53c14c2f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.5842696629, "max_line_length": 102, "alphanum_fraction": 0.6538971808, "num_tokens": 1679, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8056321889812553, "lm_q2_score": 0.6187804337438501, "lm_q1q2_score": 0.49850943533582853}} {"text": "#ifndef PYTHONIC_INCLUDE_NUMPY_EXPM1_HPP\n#define PYTHONIC_INCLUDE_NUMPY_EXPM1_HPP\n\n#include \"pythonic/include/utils/functor.hpp\"\n#include \"pythonic/include/types/ndarray.hpp\"\n#include \"pythonic/include/utils/numpy_traits.hpp\"\n\n#include \n\nPYTHONIC_NS_BEGIN\n\nnamespace numpy\n{\n\n namespace wrapper\n {\n template \n std::complex expm1(std::complex const &val)\n {\n return exp(val) - 1;\n }\n template \n auto expm1(T const &val) -> decltype(boost::simd::expm1(val))\n {\n return boost::simd::expm1(val);\n }\n }\n\n#define NUMPY_NARY_FUNC_NAME expm1\n#define NUMPY_NARY_FUNC_SYM wrapper::expm1\n#include \"pythonic/include/types/numpy_nary_expr.hpp\"\n}\nPYTHONIC_NS_END\n\n#endif\n", "meta": {"hexsha": "4fa853c8139cacaf4e34ec827f308ef06b586bdb", "size": 749, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "pythran/pythonic/include/numpy/expm1.hpp", "max_stars_repo_name": "SylvainCorlay/pythran", "max_stars_repo_head_hexsha": "908ec070d837baf77d828d01c3e35e2f4bfa2bfa", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2018-03-24T00:33:03.000Z", "max_stars_repo_stars_event_max_datetime": "2018-03-24T00:33:03.000Z", "max_issues_repo_path": "pythran/pythonic/include/numpy/expm1.hpp", "max_issues_repo_name": "SylvainCorlay/pythran", "max_issues_repo_head_hexsha": "908ec070d837baf77d828d01c3e35e2f4bfa2bfa", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pythran/pythonic/include/numpy/expm1.hpp", "max_forks_repo_name": "SylvainCorlay/pythran", "max_forks_repo_head_hexsha": "908ec070d837baf77d828d01c3e35e2f4bfa2bfa", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.8055555556, "max_line_length": 65, "alphanum_fraction": 0.7289719626, "num_tokens": 207, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8056321796478254, "lm_q2_score": 0.6187804337438501, "lm_q1q2_score": 0.4985094295604847}} {"text": "#pragma once\n#include \n#include \n#include \n\n// Provides a number of classes that encapsulate non-uniform probability\n// distributions (Normal, Poisson, etc).\nclass PoissonDistribution\n{\n public:\n PoissonDistribution(const int mean);\n\n int next();\n\n private:\n boost::mt19937 mt;\n boost::poisson_distribution pdist;\n boost::variate_generator> poisson_generator;\n};\n\n", "meta": {"hexsha": "7e2591090c898a2dfb8cc4841c0b10c35c18b336", "size": 545, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "engine/include/Random.hpp", "max_stars_repo_name": "sidav/shadow-of-the-wyrm", "max_stars_repo_head_hexsha": "747afdeebed885b1a4f7ab42f04f9f756afd3e52", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 60.0, "max_stars_repo_stars_event_min_datetime": "2019-08-21T04:08:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T13:48:04.000Z", "max_issues_repo_path": "engine/include/Random.hpp", "max_issues_repo_name": "cleancoindev/shadow-of-the-wyrm", "max_issues_repo_head_hexsha": "51b23e98285ecb8336324bfd41ebf00f67b30389", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3.0, "max_issues_repo_issues_event_min_datetime": "2021-03-18T15:11:14.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-20T12:13:07.000Z", "max_forks_repo_path": "engine/include/Random.hpp", "max_forks_repo_name": "cleancoindev/shadow-of-the-wyrm", "max_forks_repo_head_hexsha": "51b23e98285ecb8336324bfd41ebf00f67b30389", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8.0, "max_forks_repo_forks_event_min_datetime": "2019-11-16T06:29:05.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-23T17:33:43.000Z", "avg_line_length": 25.9523809524, "max_line_length": 97, "alphanum_fraction": 0.7633027523, "num_tokens": 124, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES\n\n", "lm_q1_score": 0.8056321703143954, "lm_q2_score": 0.6187804337438501, "lm_q1q2_score": 0.49850942378514085}} {"text": "// Boost.Geometry\n// Unit Test\n\n// Copyright (c) 2016 Oracle and/or its affiliates.\n\n// Contributed and/or modified by Adam Wulkiewicz, on behalf of Oracle\n\n// Use, modification and distribution is subject to the Boost Software License,\n// Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt)\n\n#include \"test_formula.hpp\"\n#include \"intersection_cases.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nvoid check_inverse(expected_result const& result, expected_result const& expected, expected_result const& reference, double reference_error)\n{\n check_one(result.lon, expected.lon, reference.lon, reference_error);\n check_one(result.lat, expected.lat, reference.lat, reference_error);\n}\n\nvoid test_all(expected_results const& results)\n{\n double const d2r = bg::math::d2r();\n double const r2d = bg::math::r2d();\n\n double lona1r = results.p1.lon * d2r;\n double lata1r = results.p1.lat * d2r;\n double lona2r = results.p2.lon * d2r;\n double lata2r = results.p2.lat * d2r;\n double lonb1r = results.q1.lon * d2r;\n double latb1r = results.q1.lat * d2r;\n double lonb2r = results.q2.lon * d2r;\n double latb2r = results.q2.lat * d2r;\n\n expected_result result;\n\n // WGS84\n bg::srs::spheroid spheroid(6378137.0, 6356752.3142451793);\n\n bg::formula::gnomonic_intersection\n ::apply(lona1r, lata1r, lona2r, lata2r, lonb1r, latb1r, lonb2r, latb2r, result.lon, result.lat, spheroid);\n result.lon *= r2d;\n result.lat *= r2d;\n check_inverse(result, results.gnomonic_vincenty, results.gnomonic_karney, 0.00000001);\n\n bg::formula::gnomonic_intersection\n ::apply(lona1r, lata1r, lona2r, lata2r, lonb1r, latb1r, lonb2r, latb2r, result.lon, result.lat, spheroid);\n result.lon *= r2d;\n result.lat *= r2d;\n check_inverse(result, results.gnomonic_thomas, results.gnomonic_karney, 0.0000001);\n\n bg::formula::sjoberg_intersection\n ::apply(lona1r, lata1r, lona2r, lata2r, lonb1r, latb1r, lonb2r, latb2r, result.lon, result.lat, spheroid);\n result.lon *= r2d;\n result.lat *= r2d;\n check_inverse(result, results.sjoberg_vincenty, results.sjoberg_karney, 0.00000001);\n\n bg::formula::sjoberg_intersection\n ::apply(lona1r, lata1r, lona2r, lata2r, lonb1r, latb1r, lonb2r, latb2r, result.lon, result.lat, spheroid);\n result.lon *= r2d;\n result.lat *= r2d;\n check_inverse(result, results.sjoberg_thomas, results.sjoberg_karney, 0.0000001);\n\n bg::formula::sjoberg_intersection\n ::apply(lona1r, lata1r, lona2r, lata2r, lonb1r, latb1r, lonb2r, latb2r, result.lon, result.lat, spheroid);\n result.lon *= r2d;\n result.lat *= r2d;\n check_inverse(result, results.sjoberg_andoyer, results.sjoberg_karney, 0.0001);\n}\n\nint test_main(int, char*[])\n{\n for (size_t i = 0; i < expected_size; ++i)\n {\n test_all(expected[i]);\n }\n\n return 0;\n}\n", "meta": {"hexsha": "f0b1b49d7db8b5c6c6299f5fd2d5c92d7ef85158", "size": 3528, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "libs/geometry/test/formulas/intersection.cpp", "max_stars_repo_name": "metux/boost", "max_stars_repo_head_hexsha": "e0157afdd519a2b14356cea62fcdac81829324cc", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2015-06-01T15:17:22.000Z", "max_stars_repo_stars_event_max_datetime": "2015-06-01T16:06:53.000Z", "max_issues_repo_path": "libs/geometry/test/formulas/intersection.cpp", "max_issues_repo_name": "metux/boost", "max_issues_repo_head_hexsha": "e0157afdd519a2b14356cea62fcdac81829324cc", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 28.0, "max_issues_repo_issues_event_min_datetime": "2016-10-16T19:42:37.000Z", "max_issues_repo_issues_event_max_datetime": "2018-09-14T21:29:48.000Z", "max_forks_repo_path": "libs/geometry/test/formulas/intersection.cpp", "max_forks_repo_name": "metux/boost", "max_forks_repo_head_hexsha": "e0157afdd519a2b14356cea62fcdac81829324cc", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2017-03-19T07:18:18.000Z", "max_forks_repo_forks_event_max_datetime": "2017-03-19T07:18:18.000Z", "avg_line_length": 40.0909090909, "max_line_length": 140, "alphanum_fraction": 0.7219387755, "num_tokens": 1117, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8519528019683106, "lm_q2_score": 0.5851011542032312, "lm_q1q2_score": 0.4984785677583354}} {"text": "//---------------------------------------------------------------------------//\n// Copyright (c) 2020 Mikhail Komarov \n//\n// Distributed under the Boost Software License, Version 1.0\n// See accompanying file LICENSE_1_0.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt\n//---------------------------------------------------------------------------//\n\n#ifndef BOOST_MULTIPRECISION_MODULAR_BACKENDS_INVERSE_HPP\n#define BOOST_MULTIPRECISION_MODULAR_BACKENDS_INVERSE_HPP\n\n#include \n#include \n\n#include \n\n#include \n#include \n\nnamespace nil {\n namespace crypto3 {\n namespace multiprecision {\n namespace backends {\n\n template\n constexpr Backend eval_extended_euclidean_algorithm(Backend& a, Backend& b, Backend& x, Backend& y) {\n if (eval_is_zero(a)) {\n using ui_type = typename std::tuple_element<0, typename Backend::unsigned_types>::type;\n x = ui_type(0u);\n y = ui_type(1u);\n return b;\n }\n Backend x1, y1, tmp = b;\n eval_modulus(tmp, a);\n Backend d = eval_extended_euclidean_algorithm(tmp, a, x1, y1);\n tmp = b;\n eval_divide(tmp, a);\n eval_multiply(tmp, x1);\n x = y1;\n eval_subtract(x, tmp);\n y = x1;\n return d;\n }\n\n template\n constexpr Backend eval_inverse_extended_euclidean_algorithm(const Backend& a, const Backend& m) {\n using Backend_doubled = typename default_ops::double_precision_type::type;\n\n Backend aa = a, mm = m, x, y, g;\n using ui_type = typename std::tuple_element<0, typename Backend::unsigned_types>::type;\n g = eval_extended_euclidean_algorithm(aa, mm, x, y);\n if (!eval_eq(g, ui_type(1u))) {\n // BOOST_THROW_EXCEPTION(std::invalid_argument(\"eval_inverse_with_gcd: no inverse element\"));\n return ui_type(0u);\n } else {\n eval_modulus(x, m);\n Backend_doubled tmp(x);\n eval_add(tmp, m);\n eval_modulus(tmp, m);\n return static_cast(tmp);\n }\n }\n\n template\n constexpr typename std::tuple_element<0, typename Backend::signed_types>::type\n eval_monty_inverse(typename std::tuple_element<0, typename Backend::signed_types>::type a) {\n using si_type = typename std::tuple_element<0, typename Backend::signed_types>::type;\n\n if (a % 2 == 0) {\n throw std::invalid_argument(\"monty_inverse only valid for odd integers\");\n }\n\n /*\n * From \"A New Algorithm for Inversion mod p^k\" by \u00c7etin Kaya Ko\u00e7\n * https://eprint.iacr.org/2017/411.pdf sections 5 and 7.\n */\n\n si_type b = 1;\n si_type r = 0;\n\n for (size_t i = 0; i != sizeof(si_type) * CHAR_BIT; ++i) {\n const si_type bi = b % 2;\n r >>= 1;\n r += bi << (sizeof(si_type) * CHAR_BIT - 1);\n\n b -= a * bi;\n b >>= 1;\n }\n\n // Now invert in addition space\n r = (~static_cast(0) - r) + 1;\n\n return r;\n }\n\n template\n constexpr void eval_monty_inverse(Backend& res, const Backend& a, const Backend& p, const Backend& k) {\n\n using default_ops::eval_abs;\n using default_ops::eval_gt;\n using default_ops::eval_modulus;\n using default_ops::eval_subtract;\n\n using ui_type = typename std::tuple_element<0, typename Backend::unsigned_types>::type;\n Backend zero = ui_type(0u);\n Backend one = ui_type(1u);\n Backend two = ui_type(2u);\n\n /*\n * From \"A New Algorithm for Inversion mod p^k\" by \u00c7etin Kaya Ko\u00e7\n * https://eprint.iacr.org/2017/411.pdf sections 5 and 7.\n */\n Backend c, tmp;\n\n // a^(-1) mod p:\n c = eval_inverse_extended_euclidean_algorithm(a, p);\n\n Backend bi = one, bt, i = zero, k_negone = k, xi, nextp = one;\n eval_subtract(k_negone, one);\n res = zero;\n\n // ui_type kn = cpp_int(k_negone);\n\n while (!eval_eq(i, k)) {\n // xi:\n xi = bi;\n eval_multiply(xi, c);\n eval_modulus(xi, p);\n\n if (eval_get_sign(xi) < 0) {\n tmp = xi;\n eval_abs(tmp, tmp);\n eval_modulus(tmp, p);\n xi = p;\n eval_subtract(xi, tmp);\n }\n\n // bi:\n tmp = a;\n eval_multiply(tmp, xi);\n eval_subtract(bi, tmp);\n eval_divide(bi, p);\n\n // res:\n tmp = xi;\n eval_multiply(tmp, nextp);\n eval_multiply(nextp, p);\n eval_add(res, tmp);\n eval_add(i, one);\n }\n }\n\n /*\n template \n inline void bigint_shr1(typename boost::mpl::front::type x[], size_t x_size, size_t word_shift, size_t bit_shift)\n {\n typedef typename boost::mpl::front::type ui_type;\n\n const size_t top = x_size >= word_shift ? (x_size - word_shift) : 0;\n\n if (top > 0)\n copy_mem(x, x + word_shift, top);\n clear_mem(x + top, std::min(word_shift, x_size));\n\n const auto carry_mask = CT::Mask::expand(bit_shift);\n const size_t carry_shift = carry_mask.if_set_return(BOTAN_MP_WORD_BITS - bit_shift);\n\n ui_type carry = 0;\n\n for (size_t i = 0; i != top; ++i)\n {\n const ui_type w = x[top - i - 1];\n x[top - i - 1] = (w >> bit_shift) | carry;\n carry = carry_mask.if_set_return(w << carry_shift);\n }\n }\n\n template \n inline typename boost::mpl::front::type\n bigint_add2_nc( typename boost::mpl::front::type x[], size_t\n x_size, const typename boost::mpl::front::type y[], size_t y_size)\n {\n typedef typename boost::mpl::front::type ui_type;\n\n ui_type carry = 0;\n\n BOOST_ASSERT_MSG(x_size >= y_size, \"Expected sizes\");\n\n const size_t blocks = y_size - (y_size % 8);\n\n for (size_t i = 0; i != blocks; i += 8)\n carry = word8_add2(x + i, y + i, carry);\n\n for (size_t i = blocks; i != y_size; ++i)\n x[i] = word_add(x[i], y[i], &carry);\n\n for (size_t i = y_size; i != x_size; ++i)\n x[i] = word_add(x[i], 0, &carry);\n\n return carry;\n }\n\n template \n inline typename boost::mpl::front::type\n bigint_cnd_sub( typename boost::mpl::front::type cnd, typename\n boost::mpl::front::type x[], size_t x_size, const typename\n boost::mpl::front::type y[], size_t y_size)\n {\n BOOST_ASSERT_MSG(x_size >= y_size, \"Expected sizes\");\n\n typedef typename boost::mpl::front::type ui_type;\n\n const auto mask = CT::Mask::expand(cnd);\n\n ui_type carry = 0;\n\n const size_t blocks = y_size - (y_size % 8);\n ui_type z[8] = {0};\n\n for (size_t i = 0; i != blocks; i += 8)\n {\n carry = word8_sub3(z, x + i, y + i, carry);\n mask.select_n(x + i, z, x + i, 8);\n }\n\n for (size_t i = blocks; i != y_size; ++i)\n {\n z[0] = word_sub(x[i], y[i], &carry);\n x[i] = mask.select(z[0], x[i]);\n }\n\n for (size_t i = y_size; i != x_size; ++i)\n {\n z[0] = word_sub(x[i], 0, &carry);\n x[i] = mask.select(z[0], x[i]);\n }\n\n return mask.if_set_return(carry);\n }\n\n template \n inline typename boost::mpl::front::type\n bigint_cnd_add( typename boost::mpl::front::type cnd,\n typename boost::mpl::front::type x[],\n typename boost::mpl::front::type x_size,\n const typename boost::mpl::front::type y[], size_t\n y_size)\n {\n BOTAN_ASSERT(x_size >= y_size, \"Expected sizes\");\n\n typedef typename boost::mpl::front::type ui_type;\n\n const auto mask = CT::Mask::expand(cnd);\n\n ui_type carry = 0;\n\n const size_t blocks = y_size - (y_size % 8);\n ui_type z[8] = {0};\n\n for (size_t i = 0; i != blocks; i += 8)\n {\n carry = word8_add3(z, x + i, y + i, carry);\n mask.select_n(x + i, z, x + i, 8);\n }\n\n for (size_t i = blocks; i != y_size; ++i)\n {\n z[0] = word_add(x[i], y[i], &carry);\n x[i] = mask.select(z[0], x[i]);\n }\n\n for (size_t i = y_size; i != x_size; ++i)\n {\n z[0] = word_add(x[i], 0, &carry);\n x[i] = mask.select(z[0], x[i]);\n }\n\n return mask.if_set_return(carry);\n }\n\n template \n inline void bigint_cnd_abs(typename boost::mpl::front::type cnd, typename boost::mpl::front::type x[], size_t size)\n {\n typedef typename boost::mpl::front::type ui_type;\n const auto mask =\n CT::Mask::expand(cnd);\n\n ui_type carry = mask.if_set_return(1);\n for (size_t i = 0; i != size; ++i)\n {\n const ui_type z = word_add(~x[i], 0, &carry);\n x[i] = mask.select(z, x[i]);\n }\n }\n\n template \n inline void bigint_cnd_swap(typename boost::mpl::front::type cnd, typename boost::mpl::front::type x[], typename boost::mpl::front::type y[], size_t size)\n {\n typedef typename boost::mpl::front::type ui_type;\n const auto mask =\n CT::Mask::expand(cnd);\n\n for (size_t i = 0; i != size; ++i)\n {\n const ui_type a = x[i];\n const ui_type b = y[i];\n x[i] = mask.select(b, a);\n y[i] = mask.select(a, b);\n }\n }\n\n\n template \n void eval_inverse_mod_odd_modulus(Backend& res, const Backend& n, const Backend& mod)\n {\n typedef typename boost::mpl::front::type si_type;\n typedef typename boost::mpl::front::type ui_type;\n\n // Caller should assure these preconditions:\n BOOST_ASSERT(eval_gt(n, 0));\n BOOST_ASSERT(eval_gt(mod, 0));\n BOOST_ASSERT(eval_lt(n, mod));\n BOOST_ASSERT(eval_ge(mod, 3) && eval_modulus(mod, 2) == 1);*/\n\n /*\n This uses a modular inversion algorithm designed by Niels M\u00f6ller\n and implemented in Nettle. The same algorithm was later also\n adapted to GMP in mpn_sec_invert.\n It can be easily implemented in a way that does not depend on\n secret branches or memory lookups, providing resistance against\n some forms of side channel attack.\n There is also a description of the algorithm in Appendix 5 of \"Fast\n Software Polynomial Multiplication on ARM Processors using the NEON Engine\"\n by Danilo C\u00e2mara, Conrado P. L. Gouv\u00eaa, Julio L\u00f3pez, and Ricardo\n Dahab in LNCS 8182\n https://conradoplg.cryptoland.net/files/2010/12/mocrysen13.pdf\n Thanks to Niels for creating the algorithm, explaining some things\n about it, and the reference to the paper.\n */\n /*\n const size_t mod_words = mod.size();\n BOOST_ASSERT_MSG(mod_words > 0, \"Not empty\");\n\n std::vector tmp_mem(5 * mod_words);\n\n ui_type* v_w = &tmp_mem[0];\n ui_type* u_w = &tmp_mem[1 * mod_words];\n ui_type* b_w = &tmp_mem[2 * mod_words];\n ui_type* a_w = &tmp_mem[3 * mod_words];\n ui_type* mp1o2 = &tmp_mem[4 * mod_words];\n\n // ct::poison(tmp_mem.data(), tmp_mem.size());\n\n copy_mem(a_w, n.data(), std::min(n.size(), mod_words));\n copy_mem(b_w, mod.data(), std::min(mod.size(), mod_words));\n u_w[0] = 1;\n // v_w = 0\n\n // compute (mod + 1) / 2 which [because mod is odd] is equal to\n // (mod / 2) + 1\n copy_mem(mp1o2, mod.data(), std::min(mod.size(), mod_words));\n bigint_shr1(mp1o2, mod_words, 0, 1);\n ui_type carry = bigint_add2_nc(mp1o2, mod_words, u_w, 1);\n BOOST_ASSERT(carry == 0);\n\n // Only n.bits() + mod.bits() iterations are required, but avoid leaking the size of\n n const size_t execs = 2 * eval_msb(mod);\n\n for (size_t i = 0; i != execs; ++i)\n {\n const ui_type odd_a = a_w[0] & 1;\n\n //if(odd_a) a -= b\n ui_type underflow = bigint_cnd_sub(odd_a, a_w, b_w, mod_words);\n\n //if(underflow) { b -= a; a = abs(a); swap(u, v); }\n bigint_cnd_add(underflow, b_w, a_w, mod_words);\n bigint_cnd_abs(underflow, a_w, mod_words);\n bigint_cnd_swap(underflow, u_w, v_w, mod_words);\n\n // a >>= 1\n bigint_shr1(a_w, mod_words, 0, 1);\n\n //if(odd_a) u -= v;\n ui_type borrow = bigint_cnd_sub(odd_a, u_w, v_w, mod_words);\n\n // if(borrow) u += p\n bigint_cnd_add(borrow, u_w, mod.data(), mod_words);\n\n const ui_type odd_u = u_w[0] & 1;\n\n // u >>= 1\n bigint_shr1(u_w, mod_words, 0, 1);\n\n //if(odd_u) u += mp1o2;\n bigint_cnd_add(odd_u, u_w, mp1o2, mod_words);\n }\n\n auto a_is_0 = CT::Mask::set();\n for (size_t i = 0; i != mod_words; ++i)\n a_is_0 &= CT::Mask::is_zero(a_w[i]);\n\n auto b_is_1 = CT::Mask::is_equal(b_w[0], 1);\n for (size_t i = 1; i != mod_words; ++i)\n b_is_1 &= CT::Mask::is_zero(b_w[i]);\n\n BOOST_ASSERT_MSG(a_is_0.is_set(), \"A is zero\");\n\n // if b != 1 then gcd(n,mod) > 1 and inverse does not exist\n // in which case zero out the result to indicate this\n (~b_is_1).if_set_zero_out(v_w, mod_words);*/\n\n /*\n * We've placed the result in the lowest words of the temp buffer.\n * So just clear out the other values and then give that buffer to a\n * BigInt.\n */\n /*\n clear_mem(&tmp_mem[mod_words], 4 * mod_words);\n\n CT::unpoison(tmp_mem.data(), tmp_mem.size());\n\n Backend r;\n r.swap_reg(tmp_mem);\n return r;\n }*/\n\n /*\n template \n void inverse_mod_odd_modulus(number& res,\n const number& n,\n const number& mod)\n {\n eval_inverse_mod_odd_modulus(res.backend(), n.backend(), mod.backend());\n }\n */\n\n /*\n template \n std::size_t eval_almost_montgomery_inverse(Backend& result, const Backend& a,\n const Backend& p)\n {\n size_t k = 0;\n\n Backend u = p, v = a, r = 0, s = 1;\n\n while (eval_gt(v, 0))\n {\n if (eval_integer_modulus(u, 2) == 0)\n {\n eval_right_shift(u, 1);\n eval_left_shift(s, 1);\n }\n else if (eval_integer_modulus(v, 2) == 0)\n {\n eval_right_shift(v, 1);\n eval_left_shift(r, 1);\n }\n else if (eval_gt(u, v))\n {\n eval_subtract(u, v);\n eval_right_shift(u, 1);\n eval_add(r, s);\n eval_left_shift(s, 1);\n }\n else\n {\n eval_subtract(v, u);\n eval_right_shift(v, 1);\n eval_add(s, r);\n eval_left_shift(r, 1);\n }\n\n k++;\n }\n\n if (!eval_gt(p, r))\n {\n eval_subtract(r, p);\n }\n\n result = p;\n\n eval_subtract(result, r);\n\n return k;\n }\n */\n\n /*\n template \n std::size_t almost_montgomery_inverse(number& result,\n const number& a,\n const number& p)\n {\n return eval_almost_montgomery_inverse(result.backend(), a.backend(), p.backend());\n }\n */\n\n /*\n template \n Backend eval_normalized_montgomery_inverse(const Backend& a, const Backend& p)\n {\n Backend r;\n std::size_t k = eval_almost_montgomery_inverse(r, a, p);\n\n for (std::size_t i = 0; i != k; ++i)\n {\n if (eval_integer_modulus(p, 2) == 1)\n {\n eval_add(r, p);\n }\n eval_right_shift(r, 1);\n }\n\n return r;\n }\n */\n\n /*\n template \n number normalized_montgomery_inverse(\n const number& a,\n const number& p)\n {\n return number(\n evaL_normalized_montgomery_inverse(a.backned(), p.backend()));\n }\n */\n\n /*\n template \n Backend eval_inverse_mod_pow2(Backend& a1, size_t k)\n {\n typedef typename boost::mpl::front::type ui_type;*/\n /*\n * From \"A New Algorithm for Inversion mod p^k\" by \u00c7etin Kaya Ko\u00e7\n * https://eprint.iacr.org/2017/411.pdf sections 5 and 7.\n */\n /*\n if (eval_integer_modulus(a1, 2) == 0)\n return 0;\n\n Backend a = a1;\n eval_bit_set(a, k);\n\n Backend b = 1, X = 0, newb;\n\n const std::size_t a_words = a.sig_words();\n\n X.grow_to(round_up(k, sizeof(ui_type) * CHAR_BIT) / sizeof(ui_type) * CHAR_BIT);\n b.grow_to(a_words);\n */\n /*\n Hide the exact value of k. k is anyway known to word length\n granularity because of the length of a, so no point in doing more\n than this.\n */\n /*\n\n const std::size_t iter = round_up(k, sizeof(ui_type) * CHAR_BIT);\n\n for (std::size_t i = 0; i != iter; ++i)\n {\n const bool b0 = eval_bit_test(b, 0);\n X.conditionally_set_bit(i, b0);\n newb = b;\n eval_subtract(newb, a);\n b.ct_cond_assign(b0, newb);\n eval_right_shift(b, 1);\n }\n eval_bit_set(X, k);\n X.const_time_unpoison();\n return X;\n }\n */\n\n /*\n template \n number inverse_mod_pow2(\n const number& a1, size_t k)\n {\n return number(\n eval_inverse_mod_pow2(a1.backend(), k.backend()));\n }\n */\n\n /*\n template \n Backend eval_inverse_mod(Backend& res, const Backend& n, const Backend& mod)\n {\n if (eval_is_zero(mod))\n {\n BOOST_THROW_EXCEPTION(\n std::invalid_argument(\"eval_inverse_mod: mod must be non zero\"));\n }\n if ((eval_get_sign(mod) < 0) || (eval_get_sign(n) < 0))\n {\n BOOST_THROW_EXCEPTION(\n std::invalid_argument(\"eval_inverse_mod: arguments must be non-negative\"));\n }\n if (eval_is_zero(n) || (eval_integer_modulus(n, 2) == 0 && eval_integer_modulus(mod,\n 2) == 0))\n {\n return 0;\n }\n if (eval_integer_modulus(n, 2) == 1)\n {*/\n /*\n Fastpath for common case. This leaks information if n > mod\n but we don't guarantee const time behavior in that case.\n */\n /*\n if (eval_gt(mod, n))\n return eval_inverse_mod_odd_modulus(n, mod);\n else\n return eval_inverse_mod_odd_modulus(ct_modulo(n, mod), mod);\n }\n\n const std::size_t mod_lz = eval_lsb(mod);\n BOOST_ASSERT(mod_lz > 0);\n const std::size_t mod_bits = eval_msb(mod);\n BOOST_ASSERT(mod_bits > mod_lz);\n\n if (mod_lz == mod_bits - 1)\n {\n // In this case we are performing an inversion modulo 2^k\n return eval_inverse_mod_pow2(n, mod_lz);\n }*/\n\n /*\n * In this case we are performing an inversion modulo 2^k*o for\n * some k > 1 and some odd (not necessarily prime) integer.\n * Compute the inversions modulo 2^k and modulo o, then combine them\n * using CRT, which is possible because 2^k and o are relatively prime.\n */\n /*\n Backend o = mod;\n\n eval_right_shift(mod, mod_lz);\n\n Backend n_redc = ct_modulo(n, o);\n Backend inv_o = eval_inverse_mod_odd_modulus(n_redc, o);\n Backend inv_2k = eval_inverse_mod_pow2(n, mod_lz);\n\n // No modular inverse in this case:\n if (eval_is_zero(o) || eval_is_zero(inv_2k))\n return 0;\n\n Backend m2k = mod_lz;\n eval_multiply(m2k, m2k);\n // Compute the CRT parameter\n Backend c = inverse_mod_pow2(o, mod_lz);\n\n // Compute h = c*(inv_2k-inv_o) mod 2^k\n Backend h = inv_2k;\n\n eval_subtract(h, inv_o);\n eval_multiply(h, c);\n\n const bool h_neg = (eval_get_sign(h) < 0);\n\n eval_abs(h); // h.set_sign(BigInt::Positive);\n eval_bit_set(h, mod_lz);\n\n const bool h_nonzero = !eval_is_zero(h);\n\n eval_subtracr(m2k, h);\n h.ct_cond_assign(h_nonzero && h_neg, m2k);\n\n // Return result inv_o + h * o\n eval_multiply(h, o);\n eval_add(h, inv_o);\n\n return h;\n }\n */\n } // namespace backends\n } // namespace multiprecision\n } // namespace crypto3\n} // namespace nil\n\n#endif\n", "meta": {"hexsha": "7fd6918012764c416f82db47e0e413260ef343aa", "size": 35032, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "libs/multiprecision/include/nil/crypto3/multiprecision/modular/inverse.hpp", "max_stars_repo_name": "Curryrasul/knapsack-snark", "max_stars_repo_head_hexsha": "633515a13906407338a81b9874d964869ddec624", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-09-14T18:09:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-14T18:09:38.000Z", "max_issues_repo_path": "libs/multiprecision/include/nil/crypto3/multiprecision/modular/inverse.hpp", "max_issues_repo_name": "Curryrasul/knapsack-snark", "max_issues_repo_head_hexsha": "633515a13906407338a81b9874d964869ddec624", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "libs/multiprecision/include/nil/crypto3/multiprecision/modular/inverse.hpp", "max_forks_repo_name": "Curryrasul/knapsack-snark", "max_forks_repo_head_hexsha": "633515a13906407338a81b9874d964869ddec624", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2022-01-12T10:53:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-12T10:53:21.000Z", "avg_line_length": 50.7710144928, "max_line_length": 120, "alphanum_fraction": 0.3640956839, "num_tokens": 5820, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8519528019683105, "lm_q2_score": 0.5851011542032312, "lm_q1q2_score": 0.49847856775833527}} {"text": "/* Greg Anderson\n *\n * Wrapper classes for Apron abstractions.\n */\n\n#ifndef _ABSTRACT_H_\n#define _ABSTRACT_H_\n\n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nclass ArithExpr {\n private:\n ap_texpr0_t* expr;\n\n public:\n ArithExpr();\n ArithExpr(double constant);\n ArithExpr(int ind);\n ArithExpr(double lower, double upper);\n ArithExpr(ap_texpr0_t* expr);\n ArithExpr(const ArithExpr& other);\n ArithExpr(ArithExpr&& other);\n ArithExpr(const std::string& str);\n ~ArithExpr();\n ArithExpr& operator=(const ArithExpr& other);\n ArithExpr& operator=(ArithExpr&& other);\n ArithExpr negate() const;\n ArithExpr operator+(const ArithExpr& other) const;\n ArithExpr operator-(const ArithExpr& other) const;\n ArithExpr operator*(const ArithExpr& other) const;\n ArithExpr operator/(const ArithExpr& other) const;\n // Be careful about the precedence of ^. C++ uses this operator for XOR so\n // it has much lower precedence than you would expect for a power operator.\n ArithExpr operator^(int power) const;\n\n inline ap_texpr0_t* get_texpr() const {\n return expr;\n }\n};\n\n/**\n * A set of linear constraints. A point x satisfies these constraints if\n * `weights * x <= biases`.\n */\nclass LinCons {\n public:\n Eigen::MatrixXd weights;\n Eigen::VectorXd biases;\n LinCons();\n LinCons(const Eigen::MatrixXd& ws, const Eigen::VectorXd& bs);\n double distance_from(const Eigen::VectorXd& x) const;\n};\n\nenum class AbstractDomain { ZONOTOPE, INTERVAL, POLYHEDRA };\n\n/**\n * An abstract value over some underlying Apron value. This is just a\n * convenient wrapper class around Apron values.\n */\nclass AbstractVal {\n protected:\n ap_manager_t* man;\n ap_abstract0_t* value;\n virtual std::unique_ptr make_new(ap_abstract0_t* a) const;\n AbstractDomain domain;\n\n public:\n AbstractVal();\n AbstractVal(ap_manager_t* man, ap_abstract0_t* v);\n // Construct a new abstract value in the given domain subject to the\n // set of linear constraints a x <= b\n AbstractVal(AbstractDomain dom, const std::vector& a,\n const std::vector& b);\n AbstractVal(AbstractDomain dom, const LinCons& lc);\n\n // Construct a new abstract value in the given domain from the given\n // interval\n AbstractVal(AbstractDomain dom, const Eigen::VectorXd& lowers,\n const Eigen::VectorXd& uppers);\n AbstractVal(const AbstractVal& other);\n AbstractVal(AbstractVal&& other);\n virtual ~AbstractVal();\n AbstractVal& operator=(const AbstractVal& other) = delete;\n AbstractVal& operator=(AbstractVal&& other) = delete;\n\n inline ap_manager_t* get_manager() const {\n return man;\n }\n\n inline ap_abstract0_t* get_value() const {\n return value;\n }\n\n inline AbstractDomain get_domain() const {\n return domain;\n }\n\n std::unique_ptr add_trailing_dimensions(int n) const;\n std::unique_ptr add_leading_dimensions(int n) const;\n std::unique_ptr remove_trailing_dimensions(int n) const;\n\n /**\n * Meet this value with the linear constraints a x <= b.\n */\n virtual std::unique_ptr meet_linear_constraint(\n const Eigen::MatrixXd& a,\n const Eigen::VectorXd& b) const;\n\n /**\n * Perform a specific affine transformation.\n */\n virtual std::unique_ptr scalar_affine(\n const Eigen::MatrixXd& w,\n const Eigen::VectorXd& b) const;\n\n /**\n * Perform an abstract transformation where each coefficient is an interval.\n */\n virtual std::unique_ptr interval_affine(\n const Eigen::MatrixXd& wl,\n const Eigen::MatrixXd& wu,\n const Eigen::VectorXd& bl,\n const Eigen::VectorXd& bu) const;\n\n /**\n * A relu is computed as follows: for each dimension i, compute\n * x_l = meet(x, x_i < 0) and x_u = meet(x, x_i >= 0). Compute\n * x'_l = x_l[x_i <- 0]. Let x = join(x'_l, x_u).\n */\n virtual std::unique_ptr relu() const;\n\n virtual std::unique_ptr join(const AbstractVal& other) const;\n\n virtual std::unique_ptr meet(const AbstractVal& other) const;\n\n virtual std::unique_ptr widen(const AbstractVal& other) const;\n\n virtual bool operator==(const AbstractVal& other) const;\n\n /**\n * Create an abstract value by adding each dimension of b to this and\n * maintain the relations among variables in b.\n */\n virtual std::unique_ptr append(const AbstractVal& b) const;\n\n virtual std::unique_ptr arith_computation(\n const std::vector& exprs) const;\n\n inline bool is_bottom() const {\n return ap_abstract0_is_bottom(man, value);\n }\n\n inline bool is_top() const {\n return ap_abstract0_is_top(man, value);\n }\n\n bool contains_point(const Eigen::VectorXd& x) const;\n bool contains(const AbstractVal& x) const;\n Eigen::VectorXd get_center() const;\n virtual Eigen::VectorXd get_contained_point() const;\n\n /**\n * Get the number of dimensions of this abstract value.\n */\n inline size_t dims() const {\n return ap_abstract0_dimension(man, value).realdim;\n }\n\n virtual std::unique_ptr clone() const;\n\n virtual std::unique_ptr bottom() const;\n\n LinCons get_lincons() const;\n\n //virtual double distance_to_point(const Eigen::VectorXd& x) const;\n void print(FILE* out) const;\n};\n\n/**\n * Powerset is used for a bounded powerset domain. It is based on Aprons\n * disjunctive domain, but applies a consolidation step after each join or\n * merge.\n */\nclass Powerset: public AbstractVal {\n private:\n size_t size;\n\n protected:\n std::unique_ptr make_new(ap_abstract0_t* a) const override;\n\n public:\n Powerset(ap_manager_t* m, ap_abstract0_t* v, size_t s);\n Powerset(const Powerset& p);\n Powerset(AbstractDomain dom, size_t size,\n const std::vector& a,\n const std::vector& b);\n Powerset(AbstractDomain dom, size_t size, const LinCons& lc);\n Powerset(AbstractDomain dom, size_t size,\n const Eigen::VectorXd& lowers,\n const Eigen::VectorXd& uppers);\n Powerset& operator=(const Powerset& other);\n std::unique_ptr join(const AbstractVal& other) const override;\n std::unique_ptr meet(const AbstractVal& other) const override;\n std::unique_ptr arith_computation(\n const std::vector& exprs) const override;\n Eigen::VectorXd get_contained_point() const;\n std::unique_ptr clone() const override;\n std::unique_ptr bottom() const override;\n //double distance_to_point(const Eigen::VectorXd& x) const;\n};\n\n//class Managers {\n// private:\n// ap_manager_t* t1p_man;\n// ap_manager_t* box_man;\n// std::map disj_mans;\n//\n// public:\n// Managers(ap_manager_t*, ap_manager_t*);\n// ~Managers();\n// inline ap_manager_t* get_t1p_manager() const {\n// return t1p_man;\n// }\n// inline ap_manager_t* get_box_manager() const {\n// return box_man;\n// }\n// inline ap_manager_t* get_disj_manager(ap_manager_t* base) const {\n// return disj_mans.at(base);\n// }\n//};\n\n#endif\n", "meta": {"hexsha": "9e537a941e7343af29b6b671d375d2da27314d83", "size": 7511, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "abstract.hpp", "max_stars_repo_name": "gavlegoat/safe-learning", "max_stars_repo_head_hexsha": "614ad97834f1a96e6c9d7c8e6677277d9af4d816", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8.0, "max_stars_repo_stars_event_min_datetime": "2020-10-27T14:52:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-01T16:33:03.000Z", "max_issues_repo_path": "abstract.hpp", "max_issues_repo_name": "gavlegoat/safe-learning", "max_issues_repo_head_hexsha": "614ad97834f1a96e6c9d7c8e6677277d9af4d816", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6.0, "max_issues_repo_issues_event_min_datetime": "2020-11-13T19:09:43.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T02:23:23.000Z", "max_forks_repo_path": "abstract.hpp", "max_forks_repo_name": "gavlegoat/safe-learning", "max_forks_repo_head_hexsha": "614ad97834f1a96e6c9d7c8e6677277d9af4d816", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2020-12-03T13:52:10.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-08T01:36:17.000Z", "avg_line_length": 30.6571428571, "max_line_length": 80, "alphanum_fraction": 0.6860604447, "num_tokens": 1873, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7490872243177518, "lm_q2_score": 0.665410558746814, "lm_q1q2_score": 0.49845054848337517}} {"text": "#include \n\n#include \"mfem.hpp\"\nusing namespace mfem;\n\n#include \n#include \n\n#include \"../include/core/config.hpp\"\n#include \"../include/mymfem/utilities.hpp\"\n#include \"../include/uq/stats/pcells_hash_table.hpp\"\n\n\n//! Test creating a parallel cells hash table for a given mesh\n//! Unit Square Mesh, coarse\nTEST(ParCellsHashTable, test1)\n{\n int nprocs, myrank;\n MPI_Comm global_comm = MPI_COMM_WORLD;\n MPI_Comm_size(global_comm, &nprocs);\n MPI_Comm_rank(global_comm, &myrank);\n\n int lx = 2;\n const std::string mesh_file\n = \"../input/poisson_smooth_unitSquare/mesh_lx\"\n +std::to_string(lx);\n\n int Nx = 4;\n double xl = 0;\n double xr = 1;\n\n int Ny = 4;\n double yl = 0;\n double yr = 1;\n\n //std::cout << mesh_file << std::endl;\n std::shared_ptr mesh\n = std::make_shared(mesh_file.c_str());\n\n auto cellsHashTable\n = make_hashTable(global_comm,\n mesh, Nx, xl, xr, Ny, yl, yr);\n cellsHashTable->display();\n\n int numElements = cellsHashTable->get_numElements();\n int true_numElements = mesh->GetNE();\n ASSERT_EQ(numElements, true_numElements);\n\n Eigen::Vector2d coords;\n coords(0) = 0.51;\n coords(1) = 0.50;\n int true_idx = 2;\n int true_idy = 1;\n int idx, idy;\n std::tie(idx, idy) = cellsHashTable->search(coords);\n if (myrank == 0) {\n std::cout << \"Coords: \"\n << coords(0) << \",\" << coords(1)\n << \" are in cell \"\n << idx << \",\" << idy << std::endl;\n }\n\n ASSERT_EQ(idx, true_idx);\n ASSERT_EQ(idy, true_idy);\n}\n\nTEST(ParCellsHashTable, test2)\n{\n int nprocs, myrank;\n MPI_Comm global_comm = MPI_COMM_WORLD;\n MPI_Comm_size(global_comm, &nprocs);\n MPI_Comm_rank(global_comm, &myrank);\n\n const std::string mesh_file\n = \"../meshes/channel_L1pt5/refined\"\n \"/tri_mesh_l0.msh\";\n\n int Nx = 10;\n double xl = 0;\n double xr = 1.5;\n\n int Ny = 10;\n double yl = 0;\n double yr = 0.5;\n\n //std::cout << mesh_file << std::endl;\n std::shared_ptr mesh\n = std::make_shared(mesh_file.c_str());\n\n auto cellsHashTable\n = make_hashTable(global_comm,\n mesh, Nx, xl, xr, Ny, yl, yr);\n\n int numElements = cellsHashTable->get_numElements();\n int true_numElements = mesh->GetNE();\n ASSERT_EQ(numElements, true_numElements);\n}\n\n// End of file\n", "meta": {"hexsha": "3afab716a1be07708d4138d7cc4c12cbce0d9777", "size": 2503, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/test_phashTable.cpp", "max_stars_repo_name": "pratyuksh/NumHypSys", "max_stars_repo_head_hexsha": "29e03f9cc0572178701525210561b152d89999d4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_phashTable.cpp", "max_issues_repo_name": "pratyuksh/NumHypSys", "max_issues_repo_head_hexsha": "29e03f9cc0572178701525210561b152d89999d4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_phashTable.cpp", "max_forks_repo_name": "pratyuksh/NumHypSys", "max_forks_repo_head_hexsha": "29e03f9cc0572178701525210561b152d89999d4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.03, "max_line_length": 62, "alphanum_fraction": 0.5920894926, "num_tokens": 685, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.665410558746814, "lm_q2_score": 0.7490872187162397, "lm_q1q2_score": 0.4984505447560699}} {"text": "/**\n * @file\n * @brief NPDE homework CoupledSecondOrderBVP\n * @author Erick Schulz\n * @date 13/11/2019\n * @copyright Developed at ETH Zurich\n */\n\n#include \n#include \n#include \n#include \n\n#include \n\n#include \n#include \n\n#include \"coupledsecondorderbvp.h\"\n\nusing namespace CoupledSecondOrderBVP;\n\nint main(int /*argc*/, const char ** /*argv*/) {\n // Load mesh into a Lehrfem++ object\n auto mesh_factory = std::make_unique(2);\n const lf::io::GmshReader reader(std::move(mesh_factory),\n CURRENT_SOURCE_DIR \"/../meshes/hex1.msh\");\n auto mesh_p = reader.mesh(); // type shared_ptr< const lf::mesh::Mesh>\n\n // Load finite element space\n // We discretization by means of piecewise QUADRATIC lagrangian FE\n auto fe_space = std::make_shared>(mesh_p);\n // Obtain local->global index mapping for current finite element space\n const lf::assemble::DofHandler &dofh{fe_space->LocGlobMap()};\n // Dimension of finite element space\n const lf::uscalfe::size_type N_dofs(dofh.NumDofs());\n\n /* Solve the coupled boundary value problem */\n double gamma = 1.0; // reaction coefficientS\n // Right-hand side source function f\n auto f = lf::mesh::utils::MeshFunctionGlobal(\n [](Eigen::Vector2d x) -> double { return std::cos(x.norm()); });\n Eigen::VectorXd sol_vec = solveCoupledBVP(fe_space, gamma, f);\n\n /* Output results to vtk file */\n // We store data by keeping only the coefficients of nodal basis functions\n // In that sense, we are plotting the values of the solution at the vertices\n lf::io::VtkWriter vtk_writer(\n mesh_p, CURRENT_BINARY_DIR \"/CoupledSecondOrderBVP_solution.vtk\");\n // Write nodal data taking the values of the discrete solution at the vertices\n auto nodal_data = lf::mesh::utils::make_CodimMeshDataSet(mesh_p, 2);\n for (int global_idx = 0; global_idx < N_dofs; global_idx++) {\n if (dofh.Entity(global_idx).RefEl() == lf::base::RefElType::kPoint) {\n nodal_data->operator()(dofh.Entity(global_idx)) = sol_vec[global_idx];\n }\n };\n vtk_writer.WritePointData(\"CoupledSecondOrderBVP_solution\", *nodal_data);\n /* SAM_LISTING_END_1 */\n std::cout << \"\\n The solution vector was written to:\" << std::endl;\n std::cout << \">> CoupledSecondOrderBVP_solution.vtk\\n\" << std::endl;\n}\n", "meta": {"hexsha": "ad85ee82d380998e9bdaafa2ec9e0c82a79db257", "size": 2408, "ext": "cc", "lang": "C++", "max_stars_repo_path": "homeworks/CoupledSecondOrderBVP/templates/coupledsecondorderbvp_main.cc", "max_stars_repo_name": "padomu/NPDECODES", "max_stars_repo_head_hexsha": "d2bc5b0d2d5e76e4d5b8ab6948c82f902211182e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "homeworks/CoupledSecondOrderBVP/templates/coupledsecondorderbvp_main.cc", "max_issues_repo_name": "padomu/NPDECODES", "max_issues_repo_head_hexsha": "d2bc5b0d2d5e76e4d5b8ab6948c82f902211182e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "homeworks/CoupledSecondOrderBVP/templates/coupledsecondorderbvp_main.cc", "max_forks_repo_name": "padomu/NPDECODES", "max_forks_repo_head_hexsha": "d2bc5b0d2d5e76e4d5b8ab6948c82f902211182e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.8387096774, "max_line_length": 80, "alphanum_fraction": 0.7034883721, "num_tokens": 663, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.665410558746814, "lm_q2_score": 0.7490872131147275, "lm_q1q2_score": 0.4984505410287645}} {"text": "// Boost.Geometry (aka GGL, Generic Geometry Library)\n// Unit test\n\n// Copyright (c) 2015, Oracle and/or its affiliates.\n\n// Licensed under the Boost Software License version 1.0.\n// http://www.boost.org/users/license.html\n\n// Contributed and/or modified by Menelaos Karavelas, on behalf of Oracle\n\n#ifndef BOOST_TEST_MODULE\n#define BOOST_TEST_MODULE test_equals_on_spheroid\n#endif\n\n#include \n\n#include \n\n#include \"test_equals.hpp\"\n\n#include \n\n#include \n\nnamespace bgm = bg::model;\n\ntemplate \nstruct test_point_point\n{\n static inline void apply(std::string const& header)\n {\n std::string const str = header + \"-\";\n\n test_geometry(str + \"pp_01\", \"POINT(0 0)\", \"POINT(0 0)\", true);\n test_geometry(str + \"pp_02\", \"POINT(0 0)\", \"POINT(10 0)\", false);\n\n // points whose longitudes differ by 360 degrees\n test_geometry(str + \"pp_03\", \"POINT(0 0)\", \"POINT(360 0)\", true);\n test_geometry(str + \"pp_04\", \"POINT(10 0)\", \"POINT(370 0)\", true);\n test_geometry(str + \"pp_05\", \"POINT(10 0)\", \"POINT(-350 0)\", true);\n test_geometry(str + \"pp_06\", \"POINT(180 10)\", \"POINT(-180 10)\", true);\n test_geometry(str + \"pp_06a\", \"POINT(540 10)\", \"POINT(-540 10)\", true);\n\n#ifdef BOOST_GEOMETRY_NORMALIZE_LATITUDE\n test_geometry(str + \"pp_06b\", \"POINT(540 370)\", \"POINT(-540 -350)\", true);\n test_geometry(str + \"pp_06c\", \"POINT(1260 370)\", \"POINT(-1260 -350)\", true);\n test_geometry(str + \"pp_06d\", \"POINT(2340 370)\", \"POINT(-2340 -350)\", true);\n#endif\n\n test_geometry(str + \"pp_06e\", \"POINT(-180 10)\", \"POINT(-540 10)\", true);\n test_geometry(str + \"pp_06f\", \"POINT(180 10)\", \"POINT(-540 10)\", true);\n\n // north & south pole\n test_geometry(str + \"pp_07\", \"POINT(0 90)\", \"POINT(0 90)\", true);\n\n#ifdef BOOST_GEOMETRY_NORMALIZE_LATITUDE\n test_geometry(str + \"pp_07a\", \"POINT(0 450)\", \"POINT(10 -270)\", true);\n test_geometry(str + \"pp_07b\", \"POINT(0 270)\", \"POINT(10 90)\", false);\n test_geometry(str + \"pp_07c\", \"POINT(0 -450)\", \"POINT(10 90)\", false);\n#endif\n\n test_geometry(str + \"pp_08\", \"POINT(0 90)\", \"POINT(10 90)\", true);\n test_geometry(str + \"pp_09\", \"POINT(0 90)\", \"POINT(0 -90)\", false);\n test_geometry(str + \"pp_10\", \"POINT(0 -90)\", \"POINT(0 -90)\", true);\n test_geometry(str + \"pp_11\", \"POINT(0 -90)\", \"POINT(10 -90)\", true);\n test_geometry(str + \"pp_11a\", \"POINT(0 -90)\", \"POINT(10 90)\", false);\n test_geometry(str + \"pp_12\", \"POINT(0 -90)\", \"POINT(0 -85)\", false);\n test_geometry(str + \"pp_13\", \"POINT(0 90)\", \"POINT(0 85)\", false);\n test_geometry(str + \"pp_14\", \"POINT(0 90)\", \"POINT(10 85)\", false);\n\n // symmetric wrt prime meridian\n test_geometry(str + \"pp_15\", \"POINT(-10 45)\", \"POINT(10 45)\", false);\n test_geometry(str + \"pp_16\", \"POINT(-170 45)\", \"POINT(170 45)\", false);\n\n // other points\n test_geometry(str + \"pp_17\", \"POINT(-10 45)\", \"POINT(10 -45)\", false);\n test_geometry(str + \"pp_18\", \"POINT(-10 -45)\", \"POINT(10 45)\", false);\n test_geometry(str + \"pp_19\", \"POINT(10 -135)\", \"POINT(10 45)\", false);\n\n#ifdef BOOST_GEOMETRY_NORMALIZE_LATITUDE\n test_geometry(str + \"pp_20\", \"POINT(190 135)\", \"POINT(10 45)\", true);\n test_geometry(str + \"pp_21\", \"POINT(190 150)\", \"POINT(10 30)\", true);\n test_geometry(str + \"pp_21a\", \"POINT(-170 150)\", \"POINT(10 30)\", true);\n test_geometry(str + \"pp_22\", \"POINT(190 -135)\", \"POINT(10 -45)\", true);\n test_geometry(str + \"pp_23\", \"POINT(190 -150)\", \"POINT(10 -30)\", true);\n test_geometry(str + \"pp_23a\", \"POINT(-170 -150)\", \"POINT(10 -30)\", true);\n#endif\n }\n};\n\n\ntemplate \nstruct test_point_point_with_height\n{\n static inline void apply(std::string const& header)\n {\n std::string const str = header + \"-\";\n\n test_geometry(str + \"pp_01\",\n \"POINT(0 0 10)\",\n \"POINT(0 0 20)\",\n true);\n\n test_geometry(str + \"pp_02\",\n \"POINT(0 0 10)\",\n \"POINT(10 0 10)\",\n false);\n\n // points whose longitudes differ by 360 degrees\n test_geometry(str + \"pp_03\",\n \"POINT(0 0 10)\",\n \"POINT(360 0 10)\",\n true);\n\n // points whose longitudes differ by 360 degrees\n test_geometry(str + \"pp_04\",\n \"POINT(10 0 10)\",\n \"POINT(370 0 10)\",\n true);\n\n test_geometry(str + \"pp_05\",\n \"POINT(10 0 10)\",\n \"POINT(10 0 370)\",\n false);\n }\n};\n\n\ntemplate \nvoid test_segment_segment(std::string const& header)\n{\n typedef bgm::segment

seg;\n\n std::string const str = header + \"-\";\n\n test_geometry(str + \"ss_01\",\n \"SEGMENT(10 0,180 0)\",\n \"SEGMENT(10 0,-180 0)\",\n true);\n test_geometry(str + \"ss_02\",\n \"SEGMENT(0 90,180 0)\",\n \"SEGMENT(10 90,-180 0)\",\n true);\n test_geometry(str + \"ss_03\",\n \"SEGMENT(0 90,0 -90)\",\n \"SEGMENT(10 90,20 -90)\",\n true);\n test_geometry(str + \"ss_04\",\n \"SEGMENT(10 80,10 -80)\",\n \"SEGMENT(10 80,20 -80)\",\n false);\n test_geometry(str + \"ss_05\",\n \"SEGMENT(170 10,-170 10)\",\n \"SEGMENT(170 10,350 10)\",\n false);\n}\n\n\nBOOST_AUTO_TEST_CASE( equals_point_point_se )\n{\n typedef bg::cs::spherical_equatorial cs_type;\n\n test_point_point >::apply(\"se\");\n test_point_point >::apply(\"se\");\n test_point_point >::apply(\"se\");\n\n // mixed point types\n test_point_point\n <\n bgm::point, bgm::point\n >::apply(\"se\");\n\n test_point_point\n <\n bgm::point, bgm::point\n >::apply(\"se\");\n}\n\nBOOST_AUTO_TEST_CASE( equals_point_point_with_height_se )\n{\n typedef bg::cs::spherical_equatorial cs_type;\n\n test_point_point >::apply(\"seh\");\n test_point_point >::apply(\"seh\");\n test_point_point >::apply(\"seh\");\n\n // mixed point types\n test_point_point\n <\n bgm::point, bgm::point\n >::apply(\"seh\");\n\n test_point_point\n <\n bgm::point, bgm::point\n >::apply(\"seh\");\n}\n\nBOOST_AUTO_TEST_CASE( equals_point_point_geo )\n{\n typedef bg::cs::geographic cs_type;\n\n test_point_point >::apply(\"geo\");\n test_point_point >::apply(\"geo\");\n test_point_point >::apply(\"geo\");\n\n // mixed point types\n test_point_point\n <\n bgm::point, bgm::point\n >::apply(\"se\");\n\n test_point_point\n <\n bgm::point, bgm::point\n >::apply(\"se\");\n}\n\nBOOST_AUTO_TEST_CASE( equals_segment_segment_se )\n{\n typedef bg::cs::spherical_equatorial cs_type;\n\n test_segment_segment >(\"se\");\n test_segment_segment >(\"se\");\n test_segment_segment >(\"se\");\n}\n\nBOOST_AUTO_TEST_CASE( equals_segment_segment_geo )\n{\n typedef bg::cs::geographic cs_type;\n\n test_segment_segment >(\"geo\");\n test_segment_segment >(\"geo\");\n test_segment_segment >(\"geo\");\n}\n", "meta": {"hexsha": "282cbebfe3bcd3a37730eac3409f0d18dedb229a", "size": 8851, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/algorithms/equals/equals_on_spheroid.cpp", "max_stars_repo_name": "jkerkela/geometry", "max_stars_repo_head_hexsha": "4034ac88b214da0eab8943172eff0f1200b0a6cc", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 326.0, "max_stars_repo_stars_event_min_datetime": "2015-02-08T13:47:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T02:13:59.000Z", "max_issues_repo_path": "test/algorithms/equals/equals_on_spheroid.cpp", "max_issues_repo_name": "jkerkela/geometry", "max_issues_repo_head_hexsha": "4034ac88b214da0eab8943172eff0f1200b0a6cc", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 623.0, "max_issues_repo_issues_event_min_datetime": "2015-01-02T23:45:23.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-09T11:15:23.000Z", "max_forks_repo_path": "test/algorithms/equals/equals_on_spheroid.cpp", "max_forks_repo_name": "jkerkela/geometry", "max_forks_repo_head_hexsha": "4034ac88b214da0eab8943172eff0f1200b0a6cc", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 215.0, "max_forks_repo_forks_event_min_datetime": "2015-01-14T15:50:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T03:58:36.000Z", "avg_line_length": 37.5042372881, "max_line_length": 92, "alphanum_fraction": 0.5550785222, "num_tokens": 2648, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7490872131147275, "lm_q2_score": 0.6654105521116443, "lm_q1q2_score": 0.4984505360584438}} {"text": "#ifdef STAN_OPENCL\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nTEST(KernelGenerator, indexing_test) {\n using Eigen::MatrixXd;\n using stan::math::matrix_cl;\n\n std::string kernel_filename = \"indexing.cl\";\n MatrixXd m = MatrixXd::Random(7, 9);\n\n Eigen::MatrixXi col_idx(3, 2);\n col_idx << 2, 5, 0, 1, 6, 3;\n Eigen::MatrixXi row_idx(3, 2);\n row_idx << 1, 2, 3, 4, 2, 0;\n\n matrix_cl m_cl(m);\n matrix_cl row_idx_cl(row_idx);\n matrix_cl col_idx_cl(col_idx);\n\n auto tmp = stan::math::indexing(m_cl, row_idx_cl, col_idx_cl);\n\n matrix_cl res_cl;\n std::string kernel_src = tmp.get_kernel_source_for_evaluating_into(res_cl);\n stan::test::store_reference_kernel_if_needed(kernel_filename, kernel_src);\n std::string expected_kernel_src\n = stan::test::load_reference_kernel(kernel_filename);\n EXPECT_EQ(expected_kernel_src, kernel_src);\n\n res_cl = tmp;\n\n MatrixXd res = stan::math::from_matrix_cl(res_cl);\n\n MatrixXd correct(3, 2);\n for (int i = 0; i < 3; i++) {\n for (int j = 0; j < 2; j++) {\n correct(i, j) = m(row_idx(i, j), col_idx(i, j));\n }\n }\n EXPECT_MATRIX_EQ(res, correct);\n}\n\nTEST(KernelGenerator, indexing_multiple_operations_test) {\n using Eigen::MatrixXd;\n using Eigen::MatrixXi;\n using stan::math::matrix_cl;\n\n MatrixXd m = MatrixXd::Random(7, 9);\n\n Eigen::MatrixXi col_idx(3, 2);\n col_idx << 2, 5, 0, 1, 6, 3;\n Eigen::MatrixXi row_idx(3, 2);\n row_idx << 1, 2, 3, 4, 2, 0;\n\n Eigen::MatrixXi col_idx2(2, 2);\n col_idx2 << 0, 0, 1, 1;\n Eigen::MatrixXi row_idx2(2, 2);\n row_idx2 << 0, 1, 1, 2;\n\n matrix_cl m_cl(m);\n matrix_cl row_idx_cl(row_idx);\n matrix_cl col_idx_cl(col_idx);\n matrix_cl row_idx2_cl(row_idx2);\n matrix_cl col_idx2_cl(col_idx2);\n\n matrix_cl res_cl = stan::math::indexing(\n stan::math::indexing(m_cl, row_idx_cl, col_idx_cl),\n stan::math::indexing(row_idx2_cl, col_idx2_cl, col_idx2_cl), col_idx2_cl);\n\n MatrixXd res = stan::math::from_matrix_cl(res_cl);\n\n MatrixXd tmp(3, 2);\n for (int i = 0; i < 3; i++) {\n for (int j = 0; j < 2; j++) {\n tmp(i, j) = m(row_idx(i, j), col_idx(i, j));\n }\n }\n\n MatrixXd correct(2, 2);\n for (int i = 0; i < 2; i++) {\n for (int j = 0; j < 2; j++) {\n int a = col_idx2(i, j);\n correct(i, j) = tmp(row_idx2(a, a), a);\n }\n }\n EXPECT_MATRIX_EQ(res, correct);\n}\n\nTEST(KernelGenerator, indexing_lhs_test) {\n using Eigen::MatrixXd;\n using Eigen::MatrixXi;\n using stan::math::matrix_cl;\n\n MatrixXd m = MatrixXd::Zero(7, 9);\n MatrixXd m2 = MatrixXd::Random(3, 2);\n\n Eigen::MatrixXi col_idx(3, 2);\n col_idx << 2, 5, 0, 1, 6, 3;\n Eigen::MatrixXi row_idx(3, 2);\n row_idx << 1, 2, 3, 4, 2, 0;\n\n matrix_cl m_cl(m);\n matrix_cl m2_cl(m2);\n matrix_cl row_idx_cl(row_idx);\n matrix_cl col_idx_cl(col_idx);\n\n stan::math::indexing(m_cl, row_idx_cl, col_idx_cl) = m2_cl;\n\n MatrixXd res = stan::math::from_matrix_cl(m_cl);\n\n MatrixXd correct = m;\n\n for (int i = 0; i < 3; i++) {\n for (int j = 0; j < 2; j++) {\n correct(row_idx(i, j), col_idx(i, j)) = m2(i, j);\n }\n }\n EXPECT_MATRIX_EQ(res, correct);\n}\n\nTEST(KernelGenerator, indexing_repeat_lhs_rhs_test) {\n using Eigen::MatrixXd;\n using Eigen::MatrixXi;\n using stan::math::matrix_cl;\n\n MatrixXd m = MatrixXd::Zero(7, 9);\n MatrixXd correct = m;\n\n Eigen::MatrixXi col_idx(3, 2);\n col_idx << 2, 5, 0, 1, 6, 3;\n Eigen::MatrixXi row_idx(3, 2);\n row_idx << 1, 2, 3, 4, 2, 0;\n\n matrix_cl m_cl(m);\n matrix_cl row_idx_cl(row_idx);\n matrix_cl col_idx_cl(col_idx);\n\n auto b = stan::math::indexing(m_cl, row_idx_cl, col_idx_cl);\n\n b = b + 1;\n MatrixXd res = stan::math::from_matrix_cl(m_cl);\n\n for (int i = 0; i < 3; i++) {\n for (int j = 0; j < 2; j++) {\n correct(row_idx(i, j), col_idx(i, j)) += 1;\n }\n }\n EXPECT_MATRIX_EQ(res, correct);\n}\n\n#endif\n", "meta": {"hexsha": "7474ed16866fd8cf3041901369bb3ce10dbdd892", "size": 4126, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/unit/math/opencl/kernel_generator/indexing_test.cpp", "max_stars_repo_name": "yuzhangbit/math", "max_stars_repo_head_hexsha": "be482a212615b80319654108c9b1cf291de9a170", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2020-06-14T14:33:37.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-14T14:33:37.000Z", "max_issues_repo_path": "test/unit/math/opencl/kernel_generator/indexing_test.cpp", "max_issues_repo_name": "yuzhangbit/math", "max_issues_repo_head_hexsha": "be482a212615b80319654108c9b1cf291de9a170", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/unit/math/opencl/kernel_generator/indexing_test.cpp", "max_forks_repo_name": "yuzhangbit/math", "max_forks_repo_head_hexsha": "be482a212615b80319654108c9b1cf291de9a170", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-05-10T12:55:07.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-10T12:55:07.000Z", "avg_line_length": 25.9496855346, "max_line_length": 80, "alphanum_fraction": 0.6473582162, "num_tokens": 1437, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7490872131147275, "lm_q2_score": 0.6654105454764746, "lm_q1q2_score": 0.498450531088123}} {"text": "#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include // for boost::gcd\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n\nnamespace scitbx { namespace math {\nnamespace boost_python {\n\n void wrap_basic_statistics();\n void wrap_gaussian();\n void wrap_golay();\n void wrap_minimum_covering_sphere();\n void wrap_principal_axes_of_inertia();\n void wrap_row_echelon();\n void wrap_tensor_rank_2();\n void wrap_icosahedron();\n void wrap_chebyshev_base();\n void wrap_chebyshev_polynome();\n void wrap_chebyshev_fitter();\n void wrap_chebyshev_lsq();\n void wrap_slatec();\n void wrap_line_search();\n void wrap_r3_rotation();\n void wrap_resample();\n void wrap_quadrature();\n void wrap_unimodular_generator();\n void wrap_halton();\n void wrap_least_squares_plane();\n void wrap_continued_fraction();\n void wrap_numeric_limits();\n void wrap_distributions();\n void wrap_exp_functions();\n void wrap_zernike();\n void wrap_zernike_mom();\n void wrap_2d_zernike_mom();\n void wrap_weighted_covariance();\n void wrap_dmatrix();\n void wrap_correlation();\n void wrap_interpolation();\n void wrap_tetrahedron();\n void wrap_angle_derivative();\n\nnamespace {\n\n int\n time_gcd_int_boost(\n int n)\n {\n int result = 0;\n for(int a=0;a\n boost::optional\n dihedral_angle(\n SitesType const& sites,\n bool deg)\n {\n return dihedral(sites).angle(deg);\n }\n\n mat3\n superpose_kearsley_rotation(\n af::const_ref > const& reference_sites,\n af::const_ref > const& other_sites)\n {\n return superpose::superposition<>::kearsley_rotation(\n reference_sites, other_sites);\n }\n\n mat3< double >\n euler_angles_xyz_matrix(\n const double& ax,\n const double& ay,\n const double& az )\n {\n return euler_angles::xyz_matrix( ax, ay, az );\n }\n\n vec3< double >\n euler_angles_xyz_angles(\n const mat3< double >& m,\n const double& eps = 1e-12 )\n {\n return euler_angles::xyz_angles( m, eps );\n }\n\n mat3< double >\n euler_angles_yzx_matrix(\n const double& ay,\n const double& az,\n const double& ax )\n {\n return euler_angles::yzx_matrix( ay, az, ax );\n }\n\n vec3< double >\n euler_angles_yzx_angles(\n const mat3< double >& m,\n const double& eps = 1e-12 )\n {\n return euler_angles::yzx_angles( m, eps );\n }\n\n mat3< double >\n euler_angles_zyz_matrix(\n const double& az1,\n const double& ay,\n const double& az3 )\n {\n return euler_angles::zyz_matrix( az1, ay, az3 );\n }\n\n vec3< double >\n euler_angles_zyz_angles(\n const mat3< double >& m,\n const double& eps = 1e-12 )\n {\n return euler_angles::zyz_angles( m, eps );\n }\n\n template \n struct approx_equal_relatively_wrapper\n {\n typedef math::approx_equal_relatively wt;\n typedef T arg_t;\n typedef typename wt::amplitude_type amplitude_t;\n static bool form_1(arg_t x, arg_t y, amplitude_t relative_error) {\n wt p(relative_error);\n return p(x, y);\n }\n static bool form_2(arg_t x, arg_t y, amplitude_t relative_error,\n amplitude_t near_zero_threshold)\n {\n wt p(relative_error, near_zero_threshold);\n return p(x, y);\n }\n\n static void wrap() {\n using namespace boost::python;\n def(\"approx_equal_relatively\", form_1,\n (arg(\"x\"), arg(\"y\"), arg(\"relative_error\")));\n def(\"approx_equal_relatively\", form_2,\n (arg(\"x\"), arg(\"y\"), arg(\"relative_error\"),\n arg(\"near_zero_threshold\")));\n }\n };\n\n void init_module()\n {\n using namespace boost::python;\n\n def(\"time_gcd_int_boost\", time_gcd_int_boost);\n def(\"time_gcd_long_boost\", time_gcd_long_boost);\n def(\"gcd_int_simple\", gcd_int_simple, (arg(\"a\"), arg(\"b\")));\n def(\"time_gcd_int_simple\", time_gcd_int_simple);\n def(\"gcd_long_simple\", gcd_long_simple, (arg(\"a\"), arg(\"b\")));\n def(\"time_gcd_long_simple\", time_gcd_long_simple);\n def(\"time_gcd_unsigned_long_binary\", time_gcd_unsigned_long_binary);\n def(\"gcd_long_binary\", gcd_long_binary, (arg(\"a\"), arg(\"b\")));\n def(\"time_gcd_long_binary\", time_gcd_long_binary);\n#if defined(SCITBX_MATH_GCD_USING_ASM)\n def(\"gcd_int32_asm\", gcd_int32_asm, (arg(\"a\"), arg(\"b\")));\n def(\"time_gcd_int32_asm\", time_gcd_int32_asm);\n# if defined(__x86_64__)\n def(\"gcd_int64_asm\", gcd_int64_asm, (arg(\"a\"), arg(\"b\")));\n def(\"time_gcd_int64_asm\", time_gcd_int64_asm);\n# endif\n#endif\n\n def(\"floating_point_epsilon_float_get\",\n &floating_point_epsilon::get);\n def(\"floating_point_epsilon_double_get\",\n &floating_point_epsilon::get);\n\n def(\"erf\", (double(*)(double const&)) erf);\n def(\"erf\",\n (scitbx::af::shared(*)(\n scitbx::af::const_ref const&)) erf);\n def(\"erfc\", (double(*)(double const&)) erfc);\n def(\"erfcx\", (double(*)(double const&)) erfcx);\n\n def(\"parabolic_cylinder_d\", (double(*)(double, double))\n parabolic_cylinder_d::dv);\n\n // G-function\n def(\"GfuncOfRSsqr_approx\", (double(*)(double))\n g_function::GfuncOfRSsqr_approx);\n\n def(\"bessel_i1_over_i0\", (double(*)(double const&)) bessel::i1_over_i0);\n def(\"bessel_i1_over_i0\",\n (scitbx::af::shared(*)(scitbx::af::const_ref const&))\n bessel::i1_over_i0);\n def(\"bessel_inverse_i1_over_i0\",\n (double(*)(double const&)) bessel::inverse_i1_over_i0);\n def(\"inverse_bessel_i1_over_i0\", (scitbx::af::shared(*)(\n scitbx::af::const_ref const&)) bessel::inverse_i1_over_i0);\n def(\"bessel_i0\", (double(*)(double const&)) bessel::i0);\n def(\"bessel_i1\", (double(*)(double const&)) bessel::i1);\n def(\"bessel_ln_of_i0\", (double(*)(double const&)) bessel::ln_of_i0);\n def(\"ei1\", (double(*)(double const&)) bessel::ei1);\n def(\"ei0\", (double(*)(double const&)) bessel::ei0);\n\n typedef return_value_policy rbv;\n namespace smg=scitbx::math::gaussian_fit_1d_analytical;\n class_ >(\"gaussian_fit_1d_analytical\")\n .def(init<\n af::const_ref const&,\n af::const_ref const&,\n af::const_ref const& >(\n (arg(\"x\"),\n arg(\"y\"),\n arg(\"z\"))))\n .def(init<\n af::const_ref const&,\n af::const_ref const& >(\n (arg(\"x\"),\n arg(\"y\"))))\n .add_property(\"a\", make_getter(&smg::compute<>::a, rbv()))\n .add_property(\"b\", make_getter(&smg::compute<>::b, rbv()))\n ;\n\n //typedef return_value_policy rbv;\n namespace cueq=scitbx::math::cubic_equation;\n class_ >(\"cubic_equation_real\")\n .def(init<\n double const&,\n double const&,\n double const&,\n double const& >(\n (arg(\"a\"),\n arg(\"b\"),\n arg(\"c\"),\n arg(\"d\"))))\n .def(\"residual\", &cueq::real<>::residual)\n .add_property(\"x\", make_getter(&cueq::real<>::x, rbv()))\n .add_property(\"A\", make_getter(&cueq::real<>::A, rbv()))\n .add_property(\"B\", make_getter(&cueq::real<>::B, rbv()))\n .add_property(\"D\", make_getter(&cueq::real<>::D, rbv()))\n ;\n\n#if defined(SCITBX_MATH_BESSEL_HAS_SPHERICAL)\n def(\"spherical_bessel\",\n (double(*)(int const&, double const&))\n bessel::spherical_bessel);\n def(\"spherical_bessel_array\",\n (scitbx::af::shared< double> (*)(\n int const&, scitbx::af::shared const&))\n bessel::spherical_bessel_array);\n def(\"bessel_J\",\n (double(*)(int const&, double const&))\n bessel::bessel_J);\n def(\"bessel_J_array\",\n (scitbx::af::shared< double> (*)(\n int const&, scitbx::af::shared const&))\n bessel::bessel_J_array);\n def (\"bessel_J_zeroes\",\n (scitbx::af::shared< double> (*)(\n double const&, int const&))\n bessel::bessel_J_zeroes);\n def (\"sph_bessel_j_zeroes\",\n (scitbx::af::shared< double> (*)(\n double const&, int const&))\n bessel::sph_bessel_j_zeroes);\n\n\n#endif\n\n def(\"gamma_complete\", (double(*)(double const&, bool))\n gamma::complete, (\n arg(\"x\"),\n arg(\"minimax\")=true));\n def(\"gamma_incomplete\", (double(*)(double const&,\n double const&,\n unsigned))\n gamma::incomplete, (arg(\"a\"),\n arg(\"x\"),\n arg(\"max_iterations\")=500));\n def(\"gamma_incomplete_complement\",(double(*)(double const&,\n double const&,\n unsigned))\n gamma::incomplete_complement, (\n arg(\"a\"),\n arg(\"x\"),\n arg(\"max_iterations\")=500));\n def(\"exponential_integral_e1z\", (double(*)(double const&))\n gamma::exponential_integral_e1z );\n\n def(\"lambertw\", (double(*)(double const&, unsigned))\n lambertw, (\n arg(\"x\"),\n arg(\"max_iterations\")=100));\n\n wrap_basic_statistics();\n wrap_gaussian();\n wrap_golay();\n wrap_minimum_covering_sphere();\n wrap_principal_axes_of_inertia();\n wrap_row_echelon();\n wrap_tensor_rank_2();\n wrap_icosahedron();\n wrap_chebyshev_base();\n wrap_chebyshev_polynome();\n wrap_chebyshev_fitter();\n wrap_chebyshev_lsq();\n wrap_slatec();\n wrap_line_search();\n wrap_r3_rotation();\n wrap_resample();\n wrap_quadrature();\n wrap_unimodular_generator();\n wrap_halton();\n wrap_least_squares_plane();\n wrap_continued_fraction();\n wrap_numeric_limits();\n wrap_distributions();\n wrap_exp_functions();\n wrap_zernike();\n wrap_zernike_mom();\n wrap_2d_zernike_mom();\n wrap_weighted_covariance();\n wrap_dmatrix();\n wrap_correlation();\n wrap_interpolation();\n wrap_tetrahedron();\n wrap_angle_derivative();\n\n def(\"superpose_kearsley_rotation\", superpose_kearsley_rotation, (\n arg(\"reference_sites\"), arg(\"other_sites\")));\n\n def(\"dihedral_angle\",\n dihedral_angle, 4> >, (\n arg(\"sites\"), arg(\"deg\")=false));\n def(\"dihedral_angle\",\n dihedral_angle > >, (\n arg(\"sites\"), arg(\"deg\")=false));\n\n def( \"euler_angles_xyz_matrix\", euler_angles_xyz_matrix, (\n arg(\"ax\"), arg(\"ay\"), arg(\"az\")));\n def( \"euler_angles_xyz_angles\", euler_angles_xyz_angles, (\n arg(\"m\"), arg(\"eps\")=1e-12));\n\n def( \"euler_angles_yzx_matrix\", euler_angles_yzx_matrix, (\n arg(\"ay\"), arg(\"az\"), arg(\"ax\")));\n def( \"euler_angles_yzx_angles\", euler_angles_yzx_angles, (\n arg(\"m\"), arg(\"eps\")=1e-12));\n\n def( \"euler_angles_zyz_matrix\", euler_angles_zyz_matrix, (\n arg(\"az1\"), arg(\"ay\"), arg(\"az3\")));\n def( \"euler_angles_zyz_angles\", euler_angles_zyz_angles, (\n arg(\"m\"), arg(\"eps\")=1e-12));\n\n def(\"approx_sqrt\", math::approx_sqrt);\n\n def(\"cos_table\",\n (double(*)(\n af::const_ref const&,\n double,\n double const& ,\n int const& ,\n bool))\n math::cos_table, (\n arg(\"table\"), arg(\"arg\"), arg(\"step\"), arg(\"n\"),\n arg(\"interpolate\")));\n\n def(\"sin_table\",\n (double(*)(\n af::const_ref const&,\n double,\n double const& ,\n int const& ,\n bool))\n math::sin_table, (\n arg(\"table\"), arg(\"arg\"), arg(\"step\"), arg(\"n\"),\n arg(\"interpolate\")));\n\n def(\"signed_phase_error\",\n (double(*)(\n double const&, double const&, bool))\n math::signed_phase_error, (\n arg(\"phi1\"), arg(\"phi2\"), arg(\"deg\")=false));\n def(\"signed_phase_error\",\n (af::shared(*)(\n af::const_ref const&, af::const_ref const&, bool))\n math::signed_phase_error, (\n arg(\"phi1\"), arg(\"phi2\"), arg(\"deg\")=false));\n def(\"phase_error\",\n (double(*)(\n double const&, double const&, bool))\n math::phase_error, (\n arg(\"phi1\"), arg(\"phi2\"), arg(\"deg\")=false));\n def(\"phase_error\",\n (af::shared(*)(\n af::const_ref const&, af::const_ref const&, bool))\n math::phase_error, (\n arg(\"phi1\"), arg(\"phi2\"), arg(\"deg\")=false));\n def(\"nearest_phase\",\n (double(*)(\n double const&, double const&, bool))\n math::nearest_phase, (\n arg(\"reference\"), arg(\"other\"), arg(\"deg\")=false));\n def(\"nearest_phase\",\n (af::shared(*)(\n af::const_ref const&, af::const_ref const&, bool))\n math::nearest_phase, (\n arg(\"reference\"), arg(\"other\"), arg(\"deg\")=false));\n def(\"divmod\", math::divmod);\n approx_equal_relatively_wrapper::wrap();\n approx_equal_relatively_wrapper >::wrap();\n {\n af::tiny, 3> (*f1)(vec3 const &, vec3 const &,\n bool) = &orthonormal_basis;\n af::tiny, 3> (*f2)(vec3 const &, int,\n vec3 const &, int,\n bool) = &orthonormal_basis;\n def(\"orthonormal_basis\", f1, (arg(\"v0\"), arg(\"v1\"),\n arg(\"right_handed\")=true));\n def(\"orthonormal_basis\", f2, (arg(\"v0\"), arg(\"axis_index_1\"),\n arg(\"v1\"), arg(\"axis_index_2\"),\n arg(\"right_handed\")=true));\n }\n def(\"distance_difference_matrix\", distance_difference_matrix, (\n arg(\"sites1\"), arg(\"sites2\")));\n }\n\n}}}} // namespace scitbx::math::boost_python::\n\nBOOST_PYTHON_MODULE(scitbx_math_ext)\n{\n scitbx::math::boost_python::init_module();\n}\n", "meta": {"hexsha": "be2ee7bb1ef119df7682b6b25e106d6c4cccff57", "size": 16667, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "scitbx/math/boost_python/math_ext.cpp", "max_stars_repo_name": "rimmartin/cctbx_project", "max_stars_repo_head_hexsha": "644090f9432d9afc22cfb542fc3ab78ca8e15e5d", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 155.0, "max_stars_repo_stars_event_min_datetime": "2016-11-23T12:52:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T15:35:44.000Z", "max_issues_repo_path": "scitbx/math/boost_python/math_ext.cpp", "max_issues_repo_name": "rimmartin/cctbx_project", "max_issues_repo_head_hexsha": "644090f9432d9afc22cfb542fc3ab78ca8e15e5d", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": 590.0, "max_issues_repo_issues_event_min_datetime": "2016-12-10T11:31:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T23:10:09.000Z", "max_forks_repo_path": "scitbx/math/boost_python/math_ext.cpp", "max_forks_repo_name": "rimmartin/cctbx_project", "max_forks_repo_head_hexsha": "644090f9432d9afc22cfb542fc3ab78ca8e15e5d", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": 115.0, "max_forks_repo_forks_event_min_datetime": "2016-11-15T08:17:28.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-09T15:30:14.000Z", "avg_line_length": 29.4469964664, "max_line_length": 81, "alphanum_fraction": 0.5971080578, "num_tokens": 4642, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8152325073083131, "lm_q2_score": 0.6113819732941511, "lm_q1q2_score": 0.4984184590116949}} {"text": "#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"bte_config.h\"\n#include \"aux/rdtsc_timer.hpp\"\n#include \"collision_tensor/collision_tensor_galerkin.hpp\"\n#include \"collision_tensor/dense/collision_tensor_zlastAM_eigen.hpp\"\n#include \"collision_tensor/dense/multi_slices_factory.hpp\"\n#include \"collision_tensor/dense/storage/vbcrs_sparsity.hpp\"\n#include \"collision_tensor/dense/cluster_vbcrs_sparsity.hpp\"\n#include \"aux/filtered_range.hpp\"\n#include \"spectral/basis/spectral_basis_factory_ks.hpp\"\n\nnamespace po = boost::program_options;\n\n#include \n\nusing namespace std;\nusing namespace boltzmann;\n\ntypedef ct_dense::CollisionTensorZLastAMEigen ct_dense_t;\ntypedef SpectralBasisFactoryKS basis_factory_t;\ntypedef SpectralBasisFactoryKS::basis_type basis_type;\n\n\nint\nmain(int argc, char* argv[])\n{\n std::string version_id = GIT_SHA1;\n cout << \"VersionID: \" << version_id << \"@\" << GIT_BNAME << std::endl;\n\n int K;\n int min_blk_size;\n if (argc < 3) {\n cerr << \"usage: \" << argv[0] << \" K blksize\"\n << \"\\nK: polynomial degree\"\n << \"\\nblksize: minimum admissible block size\"\n << \"\\n\";\n return 1;\n } else {\n K = atoi(argv[1]);\n min_blk_size = atoi(argv[2]);\n cerr << \"K: \" << K << \"\\n\";\n }\n\n // create basis\n basis_type basis;\n SpectralBasisFactoryKS::create(basis, K);\n unsigned int N = basis.n_dofs();\n\n ct_dense::multi_slices_factory::container_t multi_slices;\n ct_dense::multi_slices_factory::create(multi_slices, basis);\n\n std::vector> vbcrs_sparsity_patterns(2 * K - 1);\n int i = 0;\n cout << setw(10) << \"slice id\" << setw(15) << \"msize\" << setw(15) << \"dimz\"\n << \"\\n\";\n for (auto& mslice : multi_slices) {\n auto& vbcrs = vbcrs_sparsity_patterns[i++];\n vbcrs.init(mslice.second.data(), K);\n unsigned int msize = vbcrs.memsize();\n cout << setw(10) << i << setw(15) << msize << setw(15) << vbcrs.dimz() << \"\\n\";\n }\n cout << \"----------------------------------------------------------------------\"\n << \"\\n\";\n\n typedef SpectralBasisFactoryKS::elem_t elem_t;\n typedef typename boost::mpl::at_c::type fa_type;\n typename elem_t::Acc::template get fa_accessor;\n // find elements to cluster (combine)\n struct block_t\n {\n int l;\n enum TRIG t;\n int index_first;\n int index_last;\n };\n auto cmp = [&fa_accessor](const elem_t& e, int l, enum TRIG t) {\n auto id = fa_accessor(e).get_id();\n return (id.l == l && TRIG(id.t) == t);\n };\n\n std::vector blocks;\n int offset = 0;\n for (int l = 0; l < K; ++l) {\n for (auto t : {TRIG::COS, TRIG::SIN}) {\n auto range_z =\n filtered_range(basis.begin(), basis.end(), std::bind(cmp, std::placeholders::_1, l, t));\n std::vector elemsz(std::get<0>(range_z), std::get<1>(range_z));\n if (elemsz.size() == 0) continue;\n int size = elemsz.size();\n block_t block = {l, t, offset, offset + size};\n blocks.push_back(block);\n offset += size;\n }\n }\n\n\n struct super_block_t\n {\n int extent = 0;\n int index_first = std::numeric_limits::max();\n int index_last = -1;\n std::vector elems;\n void insert(const block_t& block)\n {\n elems.push_back(block);\n extent += block.index_last - block.index_first;\n index_first = std::min(block.index_first, index_first);\n index_last = std::max(block.index_last, index_last);\n }\n };\n\n std::vector super_blocks;\n\n while (!blocks.empty()) {\n auto elem = blocks.back();\n int extent = elem.index_last - elem.index_first;\n auto& last_super_block = super_blocks.back();\n if (!super_blocks.empty() && last_super_block.extent < min_blk_size) {\n last_super_block.insert(elem);\n } else {\n super_block_t sblock;\n sblock.insert(elem);\n super_blocks.push_back(sblock);\n }\n // remove last elem\n blocks.pop_back();\n }\n\n std::sort(super_blocks.begin(),\n super_blocks.end(),\n [](const super_block_t& a, const super_block_t& b) {\n return a.index_first < b.index_first;\n });\n cout << \"found \" << super_blocks.size() << \" super blocks\"\n << \"\\n\";\n\n for (auto sblock : super_blocks) {\n cout << \"sblock.extent: \" << sblock.extent << \" count: \" << sblock.elems.size() << \", \"\n << sblock.index_first << \" -> \" << sblock.index_last << \"\\n\";\n }\n\n ct_dense::MultiSlice::key_t k(4, TRIG::COS);\n ct_dense::VBCRSSparsity<> vbcrs_blocked;\n vbcrs_blocked.init(multi_slices[k].data(), super_blocks);\n cout << \"memreq blocked: \"\n << vbcrs_blocked.memsize() << \"\\n\"\n << \"nblocks: \" << vbcrs_blocked.nblocks() << \"\\n\"\n << \"nrows: \" << vbcrs_blocked.nblock_rows() << \"\\n\";\n\n std::ofstream fout_blocked(\"vbcrs_blocked.dat\");\n vbcrs_blocked.save(fout_blocked);\n fout_blocked.close();\n\n ct_dense::VBCRSSparsity<> vbcrs;\n vbcrs.init(multi_slices[k].data(), K);\n cout << \"memreq: \"\n << vbcrs.memsize() << \"\\n\"\n << \"nblocks: \" << vbcrs.nblocks() << \"\\n\"\n << \"nrows: \" << vbcrs.nblock_rows() << \"\\n\";\n std::ofstream fout(\"vbcrs.dat\");\n vbcrs.save(fout);\n fout.close();\n\n // ==================================================\n // use `cluster_vbcrs_sparsity`\n std::vector> vb_blocked;\n cluster_vbcrs_sparsity::cluster(vb_blocked, /* dst */\n vbcrs_sparsity_patterns,\n multi_slices,\n basis,\n min_blk_size);\n\n return 0;\n}\n", "meta": {"hexsha": "3970f4623dc464f2b4ba49be8938830263e2ddba", "size": 5740, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/collision_tensor_dense/main_vbcrs.cpp", "max_stars_repo_name": "simonpintarelli/2dBoltzmann", "max_stars_repo_head_hexsha": "bc6b7bbeffa242ce80937947444383b416ba3fc9", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/collision_tensor_dense/main_vbcrs.cpp", "max_issues_repo_name": "simonpintarelli/2dBoltzmann", "max_issues_repo_head_hexsha": "bc6b7bbeffa242ce80937947444383b416ba3fc9", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/collision_tensor_dense/main_vbcrs.cpp", "max_forks_repo_name": "simonpintarelli/2dBoltzmann", "max_forks_repo_head_hexsha": "bc6b7bbeffa242ce80937947444383b416ba3fc9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1956521739, "max_line_length": 98, "alphanum_fraction": 0.6120209059, "num_tokens": 1576, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8152324893520001, "lm_q2_score": 0.6113819732941511, "lm_q1q2_score": 0.49841844803352886}} {"text": "#include \"PCE.h\"\n#include \"GeomCommonFunctions.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace PartDesignGui;\n\nLocalCS::LocalCS()\n{\n\torigin = Vector2d(0.0,0.0);\n\taxis_x = Vector2d(1.0,0.0);\n\taxis_y = Math::Functs::RotationAxis2d(axis_x, -Math::Math_PI / 2.0, Vector2d(0.0, 0.0));\n\tMath::Functs::SetVectorLength(axis_x, 1.0);\n\tMath::Functs::SetVectorLength(axis_y, 1.0);\n\tangle = Math::Functs::GetAngleBetween(axis_x, Vector2d(1.0, 0.0));\n\tif (axis_x[1] < 0.0) angle = 2 * M_PI - angle;\n\torigin_max_y = 0.0;\n\torigin_min_y = 0.0;\n\torigin_min_x = 0.0;\n\torigin_max_x = 0.0;\n\toffset_max_y = 0.0;\n\toffset_min_y = 0.0;\n\toffset_min_x = 0.0;\n\toffset_max_x = 0.0;\n}\nLocalCS::LocalCS(const Vector2d& o, const Vector2d& x)\n{\n\torigin = o;\n\taxis_x = x;\n\taxis_y = Math::Functs::RotationAxis2d(axis_x, -Math::Math_PI/2.0, Vector2d(0.0, 0.0));\n\tMath::Functs::SetVectorLength(axis_x, 1.0);\n\tMath::Functs::SetVectorLength(axis_y, 1.0);\n\tangle = Math::Functs::GetAngleBetween(axis_x, Vector2d(1.0, 0.0));\n\tif (axis_x[1] < 0.0) angle = 2 * M_PI - angle;\n\torigin_max_y = 0.0;\n\torigin_min_y = 0.0;\n\torigin_min_x = 0.0;\n\torigin_max_x = 0.0;\n\toffset_max_y = 0.0;\n\toffset_min_y = 0.0;\n\toffset_min_x = 0.0;\n\toffset_max_x = 0.0;\n}\n\nvoid LocalCS::SetCS(const Vector2d& o, const Vector2d& x)\n{\n\torigin = o;\n\taxis_x = x;\n\taxis_y = Math::Functs::RotationAxis2d(axis_x, -Math::Math_PI / 2.0, Vector2d(0.0, 0.0));\n\tMath::Functs::SetVectorLength(axis_x, 1.0);\n\tMath::Functs::SetVectorLength(axis_y, 1.0);\n\tangle = Math::Functs::GetAngleBetween(axis_x, Vector2d(1.0, 0.0));\n\tif (axis_x[1] < 0.0) angle = 2 * M_PI - angle;\n\torigin_max_y = 0.0;\n\torigin_min_y = 0.0;\n\torigin_min_x = 0.0;\n\torigin_max_x = 0.0;\n\toffset_max_y = 0.0;\n\toffset_min_y = 0.0;\n\toffset_min_x = 0.0;\n\toffset_max_x = 0.0;\n}\n\nVector2d LocalCS::GetLocal(const Vector2d& v) const\n{\n\tVector2d result = v - origin;\n\tresult = Math::Functs::RotationAxis2d(result, angle, Vector2d(0.0, 0.0));\n\treturn result;\n}\n\nVector2d1 LocalCS::GetLocal(const Vector2d1& points) const\n{\n\tVector2d1 results;\n\tfor (auto p : points)\n\t\tresults.emplace_back(GetLocal(p));\n\treturn results;\n}\n\n//need to check\nVector2d LocalCS::GetGlobalPos(const Vector2d& v) const\n{\n\tVector2d result = Math::Functs::RotationAxis2d(v, -angle, Vector2d(0.0, 0.0));\n\tresult = result + origin;\n\treturn result;\n}\n\nVector2d LocalCS::GetGlobalVec(const Vector2d& v) const\n{\n\tVector2d result = Math::Functs::RotationAxis2d(v, -angle, Vector2d(0.0, 0.0));\n\treturn result;\n}\n\nbool LocalCS::GetSegOrigin(const HMODULE& hModule, const double part_match_error, const Vector2d& s, const Vector2d& e, double& min_x, double& max_x) const\n{\n\tauto cgal_2d_inter_line_line = (CGAL_2D_Intersection_Line_Line)GetProcAddress(hModule, \"CGAL_2D_Intersection_Line_Line\");\n\tVector2d low_v = e;\n\tVector2d upper_v = s;\n\tif (s[1] <= e[1])\n\t{\n\t\tlow_v = s;\n\t\tupper_v = e;\n\t}\n\n\t//outside\n\tif (low_v[1] > origin_max_y || upper_v[1] < origin_min_y ||\n\t\tMath::Functs::IsAlmostZero_Double(low_v[1] - origin_max_y, part_match_error) || Math::Functs::IsAlmostZero_Double(upper_v[1] - origin_min_y, part_match_error))\n\t\treturn false;\n\n\t//all in\n\tif ((low_v[1] > origin_min_y||Math::Functs::IsAlmostZero_Double(low_v[1]- origin_min_y, part_match_error)) &&\n\t\t(upper_v[1] < origin_max_y||Math::Functs::IsAlmostZero_Double(upper_v[1]- origin_max_y, part_match_error)))\n\t{\n\t\tmin_x = low_v[0];\n\t\tmax_x = upper_v[0];\n\t\tif (low_v[0] > upper_v[0])\n\t\t{\n\t\t\tmin_x = upper_v[0];\n\t\t\tmax_x = low_v[0];\n\t\t}\n\t\treturn true;\n\t}\n\n\t//cutting lower\n\tif (low_v[1]< origin_min_y && upper_v[1]>origin_min_y && (upper_v[1] < origin_max_y||\n\t\tMath::Functs::IsAlmostZero_Double(upper_v[1]- origin_max_y, part_match_error)))\n\t{\n\t\tVector2d inter;\n\t\tif (cgal_2d_inter_line_line(low_v, upper_v, Vector2d(0.0, origin_min_y), Vector2d(1.0, origin_min_y), inter))\n\t\t{\n\t\t\tmin_x = inter[0];\n\t\t\tmax_x = upper_v[0];\n\t\t\tif (inter[0] > upper_v[0])\n\t\t\t{\n\t\t\t\tmin_x = upper_v[0];\n\t\t\t\tmax_x = inter[0];\n\t\t\t}\n\t\t\treturn true;\n\t\t}\n\t}\n\n\t//cutting upper\n\tif (upper_v[1] > origin_max_y && (low_v[1] > origin_min_y||Math::Functs::IsAlmostZero_Double(low_v[1]- origin_min_y, part_match_error)) && low_v[1] < origin_max_y)\n\t{\n\t\tVector2d inter;\n\t\tif (cgal_2d_inter_line_line(low_v, upper_v, Vector2d(0.0, origin_max_y), Vector2d(1.0, origin_max_y), inter))\n\t\t{\n\t\t\tmin_x = low_v[0];\n\t\t\tmax_x = inter[0];\n\t\t\tif (low_v[0] > inter[0])\n\t\t\t{\n\t\t\t\tmin_x = inter[0];\n\t\t\t\tmax_x = low_v[0];\n\t\t\t}\n\t\t\treturn true;\n\t\t}\n\t}\n\n\n\tif (upper_v[1] > origin_max_y && low_v[1] < origin_min_y)\n\t{\n\t\tVector2d inter_0;\n\t\tbool b0 = cgal_2d_inter_line_line(low_v, upper_v, Vector2d(0.0, origin_min_y), Vector2d(1.0, origin_min_y), inter_0);\n\t\tVector2d inter_1;\n\t\tbool b1 = cgal_2d_inter_line_line(low_v, upper_v, Vector2d(0.0, origin_max_y), Vector2d(1.0, origin_max_y), inter_1);\n\n\t\tif (b0 && b1)\n\t\t{\n\t\t\tmin_x = inter_0[0];\n\t\t\tmax_x = inter_1[0];\n\t\t\tif (inter_0[0] > inter_1[0])\n\t\t\t{\n\t\t\t\tmin_x = inter_1[0];\n\t\t\t\tmax_x = inter_0[0];\n\t\t\t}\n\t\t\treturn true;\n\t\t}\n\t}\n\treturn false;\n}\n\nbool LocalCS::GetSegOffset(const HMODULE& hModule, const double part_match_error, const Vector2d& s, const Vector2d& e, double& min_x, double& max_x) const\n{\n\tauto cgal_2d_inter_line_line = (CGAL_2D_Intersection_Line_Line)GetProcAddress(hModule, \"CGAL_2D_Intersection_Line_Line\");\n\n\tVector2d low_v = e;\n\tVector2d upper_v = s;\n\tif (s[1] <= e[1])\n\t{\n\t\tlow_v = s;\n\t\tupper_v = e;\n\t}\n\n\t//outside\n\tif (low_v[1] > offset_max_y || upper_v[1] < offset_min_y ||\n\t\tMath::Functs::IsAlmostZero_Double(low_v[1] - offset_max_y, part_match_error) ||\n\t\tMath::Functs::IsAlmostZero_Double(upper_v[1] - offset_min_y, part_match_error))\n\t\treturn false;\n\n\t//all in\n\tif (low_v[1] >= offset_min_y && upper_v[1] <= offset_max_y)\n\t{\n\t\tmin_x = low_v[0];\n\t\tmax_x = upper_v[0];\n\t\tif (low_v[0] > upper_v[0])\n\t\t{\n\t\t\tmin_x = upper_v[0];\n\t\t\tmax_x = low_v[0];\n\t\t}\n\t\treturn true;\n\t}\n\n\t//cutting lower\n\tif (low_v[1]< offset_min_y && upper_v[1]>offset_min_y && upper_v[1] <= offset_max_y)\n\t{\n\t\tVector2d inter;\n\t\tif (cgal_2d_inter_line_line(low_v, upper_v, Vector2d(0.0, offset_min_y), Vector2d(1.0, offset_min_y), inter))\n\t\t{\n\t\t\tmin_x = inter[0];\n\t\t\tmax_x = upper_v[0];\n\t\t\tif (inter[0] > upper_v[0])\n\t\t\t{\n\t\t\t\tmin_x = upper_v[0];\n\t\t\t\tmax_x = inter[0];\n\t\t\t}\n\t\t\treturn true;\n\t\t}\n\t}\n\n\t//cutting upper\n\tif (upper_v[1] > offset_max_y && low_v[1] >= offset_min_y && low_v[1] < offset_max_y)\n\t{\n\t\tVector2d inter;\n\t\tif (cgal_2d_inter_line_line(low_v, upper_v, Vector2d(0.0, offset_max_y), Vector2d(1.0, offset_max_y), inter))\n\t\t{\n\t\t\tmin_x = low_v[0];\n\t\t\tmax_x = inter[0];\n\t\t\tif (low_v[0] > inter[0])\n\t\t\t{\n\t\t\t\tmin_x = inter[0];\n\t\t\t\tmax_x = low_v[0];\n\t\t\t}\n\t\t\treturn true;\n\t\t}\n\t}\n\n\n\tif (upper_v[1] > offset_max_y && low_v[1] < offset_min_y)\n\t{\n\t\tVector2d inter_0;\n\t\tbool b0 = cgal_2d_inter_line_line(low_v, upper_v, Vector2d(0.0, offset_min_y), Vector2d(1.0, offset_min_y), inter_0);\n\t\tVector2d inter_1;\n\t\tbool b1 = cgal_2d_inter_line_line(low_v, upper_v, Vector2d(0.0, offset_max_y), Vector2d(1.0, offset_max_y), inter_1);\n\n\t\tif (b0 && b1)\n\t\t{\n\t\t\tmin_x = inter_0[0];\n\t\t\tmax_x = inter_1[0];\n\t\t\tif (inter_0[0] > inter_1[0])\n\t\t\t{\n\t\t\t\tmin_x = inter_1[0];\n\t\t\t\tmax_x = inter_0[0];\n\t\t\t}\n\t\t\treturn true;\n\t\t}\n\t}\n\treturn false;\n}\n\nVector2d1 LocalCS::SortX(const Vector2d1& segs) const\n{\n\tVector2d1 sort_segs;\n\tif (segs.size() == 0) return sort_segs;\n\tdouble minimal_x = segs[0][0];\n\tdouble maximal_x = segs[0][1];\n\n\tfor (int iter = 0; iter < segs.size(); iter++)\n\t{\n\t\tauto& seg = segs[iter];\n\t\tif (seg[0] >= minimal_x && seg[0] <= maximal_x)\n\t\t{\n\t\t\tif (seg[1] > maximal_x)\n\t\t\t\tmaximal_x = seg[1];\n\t\t}\n\t\telse\n\t\t{\n\t\t\tsort_segs.emplace_back(minimal_x, maximal_x);\n\t\t\tminimal_x = segs[iter][0];\n\t\t\tmaximal_x = segs[iter][1];\n\t\t}\n\n\t\tif (iter == segs.size() - 1)\n\t\t{\n\t\t\tsort_segs.emplace_back(minimal_x, maximal_x);\n\t\t\tbreak;\n\t\t}\n\t}\n\treturn sort_segs;\n}\n\nVector2d1 LocalCS::EmptyX(const Vector2d1& sort_segs) const\n{\n\tVector2d1 empty_segs;\n\tfor (int i = 1; i < sort_segs.size(); i++)\n\t\tempty_segs.emplace_back(sort_segs[i - 1][1], sort_segs[i][0]);\n\treturn empty_segs;\n}", "meta": {"hexsha": "18e738ea586b8373f26ac80a06f24bb78ac7bf82", "size": 8079, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Gui/PCE_CS.cpp", "max_stars_repo_name": "haisenzhao/CarpentryCompiler", "max_stars_repo_head_hexsha": "c9714310b7ce7523a25becd397265bfaa3ab7ea3", "max_stars_repo_licenses": ["FSFAP"], "max_stars_count": 21.0, "max_stars_repo_stars_event_min_datetime": "2019-12-06T09:57:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-22T12:58:09.000Z", "max_issues_repo_path": "Gui/PCE_CS.cpp", "max_issues_repo_name": "haisenzhao/CarpentryCompiler", "max_issues_repo_head_hexsha": "c9714310b7ce7523a25becd397265bfaa3ab7ea3", "max_issues_repo_licenses": ["FSFAP"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Gui/PCE_CS.cpp", "max_forks_repo_name": "haisenzhao/CarpentryCompiler", "max_forks_repo_head_hexsha": "c9714310b7ce7523a25becd397265bfaa3ab7ea3", "max_forks_repo_licenses": ["FSFAP"], "max_forks_count": 5.0, "max_forks_repo_forks_event_min_datetime": "2020-11-18T00:09:30.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-13T04:40:47.000Z", "avg_line_length": 26.0612903226, "max_line_length": 164, "alphanum_fraction": 0.6712464414, "num_tokens": 3063, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8152324893519999, "lm_q2_score": 0.6113819732941511, "lm_q1q2_score": 0.49841844803352875}} {"text": "/*\n For more information, please see: http://software.sci.utah.edu\n\n The MIT License\n\n Copyright (c) 2015 Scientific Computing and Imaging Institute,\n University of Utah.\n\n \n Permission is hereby granted, free of charge, to any person obtaining a\n copy of this software and associated documentation files (the \"Software\"),\n to deal in the Software without restriction, including without limitation\n the rights to use, copy, modify, merge, publish, distribute, sublicense,\n and/or sell copies of the Software, and to permit persons to whom the\n Software is furnished to do so, subject to the following conditions:\n\n The above copyright notice and this permission notice shall be included\n in all copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n DEALINGS IN THE SOFTWARE.\n*/\n/// @todo Documentation Core/Datatypes/Legacy/Matrix/DenseMatrix.cc\n\n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n\n#include \n\n#include \n\n#include \n\nnamespace SCIRun {\n\n// NOTE: returns 1 if successful, or 0 if unsuccessful (i.e. ignore the solution vector)\nint\nLinearAlgebra::solve(const DenseMatrix& matrix, ColumnMatrix& sol)\n{\n ColumnMatrix b(sol);\n return solve(matrix, b, sol);\n}\n\n\n// NOTE: returns 1 if successful, or 0 if unsuccessful (i.e. ignore the solution vector)\nint\nLinearAlgebra::solve(const DenseMatrix& matrix, const ColumnMatrix& rhs, ColumnMatrix& lhs)\n{\n ASSERT(matrix.nrows() == matrix.ncols());\n ASSERT(rhs.nrows() == matrix.ncols());\n lhs=rhs;\n\n double **A;\n double **cpy = 0;\n double *lhsp = lhs.get_data_pointer();\n\t\n cpy = new double*[matrix.nrows()]; \n\n for (index_type j=0; j < matrix.nrows(); j++)\n cpy[j] = matrix.get_raw_2D_pointer()[j];\n\n A = cpy;\n\n // Gauss-Jordan with partial pivoting\n index_type i;\n for (i=0; i < matrix.nrows(); i++)\n {\n double max=Abs(A[i][i]);\n index_type row=i;\n index_type j;\n for (j=i+1; j < matrix.nrows(); j++)\n {\n if(Abs(A[j][i]) > max)\n {\n max=Abs(A[j][i]);\n row=j;\n }\n }\n // ASSERT(Abs(max) > 1.e-12);\n if (Abs(max) < 1.e-12)\n {\n lhs=rhs;\n delete cpy;\n return 0;\n }\n if(row != i)\n {\n // Switch rows (actually their pointers)\n std::swap(A[i], A[row]);\n std::swap(lhsp[i], lhsp[row]);\n }\n double denom=1./A[i][i];\n double* r1=A[i];\n double s1=lhsp[i];\n for (j=i+1; j 1.e-12);\n if (Abs(A[i][i]) < 1.e-12)\n {\n lhs=rhs;\n delete cpy;\n return 0;\n }\n double denom=1./A[i][i];\n double* r1=A[i];\n double s1=lhsp[i];\n for (index_type j=0;j 1.e-12);\n if (Abs(A[i][i]) < 1.e-12)\n {\n lhs=rhs;\n delete cpy;\n return 0;\n }\n double factor=1./A[i][i];\n for (index_type j=0; j sigma(new double[SIGMA_LEN]);\n\n try\n {\n lapacksvd(matrix.get_raw_2D_pointer(), matrix.nrows(), matrix.ncols(), sigma.get(), U.get_raw_2D_pointer(), VT.get_raw_2D_pointer());\n }\n catch (const SCIRun::LapackError& exception)\n {\n std::ostringstream oss;\n oss << \"Caught LapackError exception: \" << exception.message();\n // in the absence of a logging service\n std::cerr << oss.str() << std::endl;\n throw;\n }\n\n size_type nnz = 0;\n for (size_type i = 0; i < SIGMA_LEN; ++i)\n {\n if ( fabs(sigma[i]) >= std::numeric_limits::epsilon() )\n {\n ++nnz;\n }\n }\n\n SparseRowMatrix::Data sparseData(matrix.nrows() + 1, nnz);\n const SparseRowMatrix::Rows& rows = sparseData.rows();\n const SparseRowMatrix::Columns& columns = sparseData.columns();\n const SparseRowMatrix::Storage& a = sparseData.data();\n if (!sparseData.allocated())\n {\n std::cerr << \"Could not allocate memory for sparse matrix buffers \"\n << __FILE__ << \": \" << __LINE__ << std::endl;\n\n return;\n }\n\n // singular values on diagonal\n index_type count = 0;\n index_type i = 0;\n const index_type NROWS = matrix.nrows();\n for (index_type r = 0; r < NROWS; r++)\n {\n rows[r] = count;\n if ( r < matrix.ncols() && i < SIGMA_LEN && fabs(sigma[i]) >= std::numeric_limits::epsilon() )\n {\n columns[count] = r;\n a[count] = sigma[i];\n ++count;\n ++i;\n }\n }\n rows[matrix.nrows()] = count;\n \n S = new SparseRowMatrix(matrix.nrows(), matrix.ncols(), sparseData, nnz);\n}\n\nvoid\nLinearAlgebra::svd(const DenseMatrix& matrix, DenseMatrix& U, DenseMatrix& S, DenseMatrix& VT)\n{\n ASSERTEQ(U.ncols(), U.nrows());\n ASSERTEQ(VT.ncols(), VT.nrows());\n ASSERTEQ(U.nrows(), matrix.nrows());\n ASSERTEQ(VT.ncols(), matrix.ncols());\n ASSERTEQ(S.nrows(), matrix.nrows());\n ASSERTEQ(S.ncols(), matrix.ncols());\n\n /*\n * LAPACK function dgesvd argument S is a DOUBLE PRECISION array\n * with dimension (min(M,N)).\n */\n const size_type SIGMA_LEN = std::min(matrix.nrows(), matrix.ncols());\n boost::shared_array sigma(new double[SIGMA_LEN]);\n\n try\n {\n lapacksvd(matrix.get_raw_2D_pointer(), matrix.nrows(), matrix.ncols(), sigma.get(), U.get_raw_2D_pointer(), VT.get_raw_2D_pointer());\n }\n catch (const SCIRun::LapackError& exception)\n {\n std::ostringstream oss;\n oss << \"Caught LapackError exception: \" << exception.message();\n // in the absence of a logging service\n std::cerr << oss.str() << std::endl;\n throw;\n }\n\n // Put singular values on diagonal.\n for (size_type i = 0; i < SIGMA_LEN; ++i)\n S.put(i, i, sigma[i]);\n}\n\nvoid\nLinearAlgebra::svd(const DenseMatrix& matrix, DenseMatrix& U, ColumnMatrix& S, DenseMatrix& VT)\n{\n // Check whether matrices are square\n if (U.ncols() != U.nrows())\n {\n SCI_THROW(DimensionMismatch(U.ncols(), U.nrows(), __FILE__, __LINE__));\n }\n \n if (VT.ncols() != VT.nrows())\n {\n SCI_THROW(DimensionMismatch(VT.ncols(), VT.nrows(), __FILE__, __LINE__));\n }\n \n if (U.nrows())\n {\n if (U.nrows() != matrix.nrows())\n {\n SCI_THROW(DimensionMismatch(U.nrows(), matrix.nrows(), __FILE__, __LINE__));\n } \n }\n if (VT.ncols())\n {\n if (VT.ncols() != matrix.ncols())\n {\n SCI_THROW(DimensionMismatch(VT.ncols(), matrix.ncols(), __FILE__, __LINE__));\n }\n }\n \n if (matrix.nrows() < matrix.ncols())\n {\n if (S.nrows() != matrix.nrows())\n {\n SCI_THROW(DimensionMismatch(S.nrows(), matrix.nrows(), __FILE__, __LINE__));\n }\n }\n else\n {\n if (S.nrows() != matrix.ncols())\n {\n SCI_THROW(DimensionMismatch(S.nrows(), matrix.ncols(), __FILE__, __LINE__));\n }\n }\n\n try\n {\n if (U.nrows() == 0 && VT.nrows() == 0)\n lapacksvd(matrix.get_raw_2D_pointer(), matrix.nrows(), matrix.ncols(), S.get_data_pointer(), 0, 0);\n else if (U.nrows() != 0 && VT.nrows() == 0)\n lapacksvd(matrix.get_raw_2D_pointer(), matrix.nrows(), matrix.ncols(), S.get_data_pointer(), U.get_raw_2D_pointer(), 0);\n else if (U.nrows() == 0 && VT.nrows() != 0)\n lapacksvd(matrix.get_raw_2D_pointer(), matrix.nrows(), matrix.ncols(), S.get_data_pointer(), 0, VT.get_raw_2D_pointer());\n else \n lapacksvd(matrix.get_raw_2D_pointer(), matrix.nrows(), matrix.ncols(), S.get_data_pointer(), U.get_raw_2D_pointer(), VT.get_raw_2D_pointer());\n }\n catch (const SCIRun::LapackError& exception)\n {\n std::ostringstream oss;\n oss << \"Caught LapackError exception: \" << exception.message();\n // in the absence of a logging service\n std::cerr << oss.str() << std::endl;\n throw;\n }\n}\n\nvoid\nLinearAlgebra::eigenvalues(const DenseMatrix& matrix, ColumnMatrix& R, ColumnMatrix& I)\n{\n ASSERTEQ(matrix.ncols(), matrix.nrows());\n ASSERTEQ(R.nrows(), I.nrows());\n ASSERTEQ(matrix.ncols(), R.nrows());\n\n boost::shared_array Er(new double[matrix.nrows()]);\n boost::shared_array Ei(new double[matrix.nrows()]);\n\n try\n {\n lapackeigen(matrix.get_raw_2D_pointer(), matrix.nrows(), Er.get(), Ei.get());\n }\n catch (const SCIRun::LapackError& exception)\n {\n std::ostringstream oss;\n oss << \"Caught LapackError exception: \" << exception.message();\n // in the absence of a logging service\n std::cerr << oss.str() << std::endl;\n throw;\n }\n\n for (index_type i = 0; i < matrix.nrows(); i++)\n {\n R[i] = Er[i];\n I[i] = Ei[i];\n }\n}\n\nvoid\nLinearAlgebra::eigenvectors(const DenseMatrix& matrix, ColumnMatrix& R, ColumnMatrix& I, DenseMatrix& Vecs)\n{\n ASSERTEQ(matrix.ncols(), matrix.nrows());\n ASSERTEQ(R.nrows(), I.nrows());\n ASSERTEQ(matrix.ncols(), R.nrows());\n\n boost::shared_array Er(new double[matrix.nrows()]);\n boost::shared_array Ei(new double[matrix.nrows()]);\n\n try\n {\n lapackeigen(matrix.get_raw_2D_pointer(), matrix.nrows(), Er.get(), Ei.get(), Vecs.get_raw_2D_pointer());\n }\n catch (const SCIRun::LapackError& exception)\n {\n std::ostringstream oss;\n oss << \"Caught LapackError exception: \" << exception.message();\n // in the absence of a logging service\n std::cerr << oss.str() << std::endl;\n throw;\n }\n\n\n for (index_type i = 0; i\n#include \n#include \n#include \n#include \n#include \n\n// Dependencies required for eigenvalue computation\n#include \n#include \n#include \n#include \n#include \n\nusing namespace Rodin;\nusing namespace Rodin::Variational;\nusing namespace Rodin::External;\n\nEigen::SparseMatrix mfemToEigenSparse(mfem::SparseMatrix m)\n{\n typedef Eigen::Triplet Triplet;\n\n int nonZero = m.NumNonZeroElems();\n std::vector tripletList;\n\n // Get the values (i,j, data)\n int* m_I = m.GetI();\n int* m_J = m.GetJ();\n double* m_data = m.GetData();\n\n for(int r = 0; r < m.NumRows(); ++r){\n for(int j=m_I[r]; j m_sparse(m.NumRows(), m.NumCols());\n m_sparse.setFromTriplets(tripletList.begin(), tripletList.end()); //SEGFAULT\n\n return m_sparse;\n}\n\n\nclass EigenSolver{\n\n public:\n // Constructors\n EigenSolver();\n\n // Destructor\n ~EigenSolver() = default;\n\n // Accessors\n GridFunction

& getEigenFunction(int index);\n\n double getEigenValue(int index);\n std::vector getEigenValues();\n\n // Options\n EigenSolver& setNumEV(unsigned int nev);\n EigenSolver& setShift(double shift);\n\n // Solves the problem\n void solve(mfem::SparseMatrix A, mfem::SparseMatrix B, FiniteElementSpace

& fes);\n\n private:\n std::vector m_eigenvalues;\n std::vector*> m_eigenfunctions;\n unsigned int m_nev;\n double m_shift;\n};\n\n\n\nint main(int argc, char** argv)\n{\n const char* meshFile = \"../resources/mfem/levelset-cantilever2d-example.mesh\";\n\n // Define interior and exterior for level set discretization\n int Interior = 1, Exterior = 2;\n\n // Define boundary attributes\n int Gamma0 = 1, GammaD = 2, GammaN = 3, Gamma = 4;\n\n // Load mesh\n Mesh Omega;\n Omega.load(meshFile);\n\n // Solver for hilbertian regularization\n auto solver = Solver::UMFPack();\n\n // Optimization parameters\n size_t maxIt = 100;\n double eps = 1e-6;\n double hmax = 0.05;\n int k = 1; // The eigenvalue to optimize\n auto alpha = ScalarFunction(4 * hmax * hmax); // Parameter for hilbertian regularization\n\n std::vector obj;\n\n /*\n // Scalar field finite element space over the whole domain\n FiniteElementSpace

Vh(Omega);\n\n // Trim the exterior part of the mesh to solve the elasticity system\n SubMesh trimmed = Omega.trim(Exterior, Gamma);\n\n // Build a finite element space over the trimmed mesh\n FiniteElementSpace

VhInt(trimmed);\n\n // Elasticity equation\n TrialFunction uInt(VhInt);\n TestFunction vInt(VhInt);\n\n // A\n Problem stiffness(uInt, vInt);\n stiffness = Integral(Grad(uInt), Grad(vInt));\n stiffness.update().assemble();\n\n // B\n Problem mass(uInt, vInt);\n mass = Integral(uInt, vInt);\n mass.update().assemble();\n\n auto& m1 = stiffness.getStiffnessMatrix();\n auto& m2 = mass.getStiffnessMatrix();\n\n\n // Solve eigenvalue problem\n EigenSolver ES;\n ES.setShift(1.0).setNumEV(k+4).solve(m1, m2, VhInt);\n\n // Get solution and transfer to original domain\n GridFunction u(Vh);\n ES.getEigenFunction(k).transfer(u);\n double mu = ES.getEigenValue(k);\n\n // Save solution\n u.save(\"u.gf\");\n Omega.save(\"Omega.mesh\");\n */\n\n // Optimization loop\n for (size_t i = 0; i < maxIt; i++)\n {\n // Scalar field finite element space over the whole domain\n FiniteElementSpace

Vh(Omega);\n\n // Trim the exterior part of the mesh to solve the elasticity system\n SubMesh trimmed = Omega.trim(Exterior, Gamma);\n\n // Build a finite element space over the trimmed mesh\n FiniteElementSpace

VhInt(trimmed);\n\n // Elasticity equation\n TrialFunction uInt(VhInt);\n TestFunction vInt(VhInt);\n\n // A\n Problem stiffness(uInt, vInt);\n stiffness = Integral(Grad(uInt), Grad(vInt));\n stiffness.update().assemble();\n\n // B\n Problem mass(uInt, vInt);\n mass = Integral(uInt, vInt);\n mass.update().assemble();\n\n auto& m1 = stiffness.getStiffnessMatrix();\n auto& m2 = mass.getStiffnessMatrix();\n\n\n // Solve eigenvalue problem\n EigenSolver ES;\n ES.setShift(1.0).setNumEV(k+4).solve(m1, m2, VhInt);\n\n // Get solution and transfer to original domain\n GridFunction u(Vh);\n ES.getEigenFunction(k).transfer(u);\n double mu = ES.getEigenValue(k);\n\n // Compute the boundary shape gradientv\n // Vector field finite element space over the whole domain\n FiniteElementSpace

Uh(Omega, 2);\n auto n = Normal(2);\n\n // GridFunction dJ(Uh);\n //dJ = Omega.getVolume(Interior)*(Dot(Grad(u), Grad(u)) + mu*Dot(u,u))*n + mu*n; // /Integral(Dot(u,u)) sur le sous-mesh\n //std::cout << dJ.getFiniteElementSpace().getVectorDimension() << std::endl;\n\n // Note from Carlos: 26/Avril/2022\n // Salut !\n // 1. I changed some things around, in particular I do not project the\n // expression for dJ on a GridFunction. Instead I wrote the thing directly\n // and hope that Rodin understands what I want hehe\n // 2. I fixed the bug in the transfer function, thank you for testing this\n // and narrowing down the problem! It helps a lot :)\n // 3. I tried to run your code and it seems that the objective goes down\n // but at one point the computation fails. I think due to parasitic\n // components.\n // 4. In the hilbertian procedure I removed the use of DirichletBC and used\n // the usual expression.\n //\n // See you soon!\n\n // Hilbert extension-regularization procedure\n // This is bullshit\n auto dJ = Dot(Grad(u).traceOf(Interior), Grad(u).traceOf(Interior)) * n;\n TrialFunction g(Uh);\n TestFunction v(Uh);\n Problem hilbert(g, v);\n hilbert = Integral(alpha * Jacobian(g), Jacobian(v))\n + Integral(g, v)\n + BoundaryIntegral(dJ, v).over(Gamma);\n solver.solve(hilbert);\n\n // Save data to inspect\n // Omega.save(\"Omegai.mesh\");\n // g.getGridFunction().save(\"g.gf\");\n\n // Update objective\n obj.push_back(\n mu * Omega.getVolume(Interior));\n std::cout << \"[\" << i << \"] Objective: \" << obj.back() << std::endl;\n\n // Convert data types to mmg types\n auto mmgMesh = Cast(Omega).to();\n auto mmgVel = Cast(g.getGridFunction()).to(mmgMesh);\n\n // Generate signed distance function\n auto mmgLs = MMG::Distancer2D().setInteriorDomain(Interior).distance(mmgMesh);\n\n // Advect the level set function\n double gInf = std::max(g.getGridFunction().max(), -g.getGridFunction().min());\n double dt = hmax / gInf;\n MMG::Advect2D(mmgLs, mmgVel).step(dt);\n\n // Recover the implicit domain\n auto mmgImplicit =\n MMG::ImplicitDomainMesher2D().split(Interior, {Interior, Exterior})\n .split(Exterior, {Interior, Exterior})\n .setRMC(1e-3)\n .setHMax(hmax)\n .setBoundaryReference(Gamma)\n .discretize(mmgLs);\n\n // Convert back to Rodin data type\n Omega = Cast(mmgImplicit).to>();\n\n // Save mesh\n Omega.save(\"Omega.mesh\");\n\n // Test for convergence\n if (obj.size() >= 2 && abs(obj[i] - obj[i - 1]) < eps)\n {\n std::cout << \"Convergence!\" << std::endl;\n break;\n }\n\n std::ofstream plt(\"obj.txt\", std::ios::trunc);\n for (size_t i = 0; i < obj.size(); i++)\n plt << i << \",\" << obj[i] << \"\\n\";\n }\n\n\n\n return 0;\n}\n\n\n// Constructor\nEigenSolver::EigenSolver(){\n m_nev = 0;\n m_shift = 0.0;\n}\n\n// Accessors\nGridFunction

& EigenSolver::getEigenFunction(int index){\n return *m_eigenfunctions[index];\n}\n\ndouble EigenSolver::getEigenValue(int index){\n return m_eigenvalues[index];\n}\n\nstd::vector EigenSolver::getEigenValues(){\n return m_eigenvalues;\n}\n\n// Options\nEigenSolver& EigenSolver::setNumEV(unsigned int nev){\n m_nev = nev;\n return *this;\n}\n\nEigenSolver& EigenSolver::setShift(double shift){\n m_shift = shift;\n\n return *this;\n}\n\nvoid EigenSolver::solve(mfem::SparseMatrix A,mfem::SparseMatrix B, FiniteElementSpace

& fes){\n\n // 1. CA and B to Eigen::SparseMatrix\n //WARNING: MAY BE USELESS SINCE SPECTRA HANDLE OTHER MATRICES TYPES\n //BUT MAY BE FASTER. IN FACT I DON'T HAVE A CLUE.\n Eigen::SparseMatrix A_sparse = mfemToEigenSparse(A);\n Eigen::SparseMatrix B_sparse = mfemToEigenSparse(B);\n\n // 2. Use Spectra with the shift-inverse method to compute eigenvalue\n // Define the shift-inverse and vector-multiplication operations\n using OpType = Spectra::SymShiftInvert;\n using BOpType = Spectra::SparseSymMatProd;\n OpType op(A_sparse, B_sparse);\n BOpType Bop(B_sparse);\n\n // Construct the generalized eigensolver object\n int ncv = 2*m_nev+1;\n Spectra::SymGEigsShiftSolver\n geigs(op, Bop, m_nev, ncv, m_shift);\n\n\n // Initialize and compute\n geigs.init();\n int nconv = geigs.compute(Spectra::SortRule::LargestMagn); // UTILISER LARGESTMAGN ET PAS SMALLESTMAGN CAR ON EST EN SHIFT-INVERSE MODE\n\n // Retrieve results\n Eigen::VectorXd evalues;\n Eigen::MatrixXd evecs;\n if (geigs.info() == Spectra::CompInfo::Successful)\n {\n evalues = geigs.eigenvalues();\n evecs = geigs.eigenvectors();\n }\n\n\n // Store in m_eigenvalues and m_eigenfunctions\n // (and reverse the order to order from lowest to higest)\n int dim = evecs.rows();\n\n for(int i = 0; i < m_nev; i++){\n m_eigenvalues.push_back(evalues[m_nev-i-1]);\n\n std::unique_ptr data(new double[dim]);\n for(int j=0; j(fes));\n m_eigenfunctions[i]->setData(std::move(data), dim);\n }\n}\n", "meta": {"hexsha": "c99a9b661a119ab6f814dafb3325a23841d01a2c", "size": 10108, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "examples/ShapeOptimization/LevelSetEigenvalue2D.cpp", "max_stars_repo_name": "carlos-brito-pacheco/rodin", "max_stars_repo_head_hexsha": "f2c946b290ebb2487a21c617de01be91a0692c72", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-12-02T19:04:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-02T19:04:38.000Z", "max_issues_repo_path": "examples/ShapeOptimization/LevelSetEigenvalue2D.cpp", "max_issues_repo_name": "cbritopacheco/rodin", "max_issues_repo_head_hexsha": "f2c946b290ebb2487a21c617de01be91a0692c72", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/ShapeOptimization/LevelSetEigenvalue2D.cpp", "max_forks_repo_name": "cbritopacheco/rodin", "max_forks_repo_head_hexsha": "f2c946b290ebb2487a21c617de01be91a0692c72", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6345609065, "max_line_length": 137, "alphanum_fraction": 0.6612584092, "num_tokens": 2843, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.815232489352, "lm_q2_score": 0.611381973294151, "lm_q1q2_score": 0.4984184480335287}} {"text": "#ifndef TVMTL_MANIFOLD_SPD_HPP\n#define TVMTL_MANIFOLD_SPD_HPP\n\n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n\n//own includes\n#include \"enumerators.hpp\"\n#include \"matrix_utils.hpp\"\n\nnamespace tvmtl {\n\n// Specialization SPD\ntemplate \nstruct Manifold< SPD, N> {\n \n public:\n\tstatic const MANIFOLD_TYPE MyType;\n\tstatic const int manifold_dim ;\n\tstatic const int value_dim; // TODO: maybe rename to embedding_dim \n\n\tstatic const bool non_isometric_embedding;\n\t\n\t// Scalar type of manifold\n\ttypedef double scalar_type;\n\ttypedef double dist_type;\n\ttypedef std::complex complex_type;\n\ttypedef std::vector weight_list; \n\n\t// Value Typedef\n\ttypedef Eigen::Matrix< scalar_type, N, N>\t\t\t\tvalue_type;\n\ttypedef value_type&\t\t\t\t\t\t\tref_type;\n\ttypedef const value_type&\t\t\t\t\t\tcref_type;\n\ttypedef std::vector >\tvalue_list; \n\t\n\t// Tangent space typedefs\n\ttypedef Eigen::Matrix tm_base_type;\n\ttypedef tm_base_type&\t\t\t\t\t tm_base_ref_type;\n\n\t// Derivative Typedefs\n\ttypedef value_type\t\t\t deriv1_type;\n\ttypedef deriv1_type&\t\t\t deriv1_ref_type;\n\t\n\ttypedef Eigen::Matrix\t\t\t\tderiv2_type;\n\ttypedef deriv2_type&\t\t\t\t\t\t\tderiv2_ref_type;\n\ttypedef\tEigen::Matrix\trestricted_deriv2_type;\n\n\n\t// Manifold distance functions (for IRLS)\n\tinline static dist_type dist_squared(cref_type x, cref_type y);\n\tinline static void deriv1x_dist_squared(cref_type x, cref_type y, deriv1_ref_type result);\n\tinline static void deriv1y_dist_squared(cref_type x, cref_type y, deriv1_ref_type result);\n\n\tinline static void deriv2xx_dist_squared(cref_type x, cref_type y, deriv2_ref_type result);\n\tinline static void deriv2xy_dist_squared(cref_type x, cref_type y, deriv2_ref_type result);\n\tinline static void deriv2yy_dist_squared(cref_type x, cref_type y, deriv2_ref_type result);\n\n\n\t// Manifold exponentials und logarithms ( for Proximal point)\n\ttemplate \n\tinline static void exp(const Eigen::MatrixBase& x, const Eigen::MatrixBase& y, Eigen::MatrixBase& result);\n\tinline static void log(cref_type x, cref_type y, ref_type result);\n\n\tinline static void convex_combination(cref_type x, cref_type y, double t, ref_type result);\n\n\t// Implementations of the Karcher mean\n\t// Slow list version\n\tinline static void karcher_mean(ref_type x, const value_list& v, double tol=1e-10, int maxit=15);\n\tinline static void weighted_karcher_mean(ref_type x, const weight_list& w, const value_list& v, double tol=1e-10, int maxit=15);\n\t// Variadic templated version\n\ttemplate \n\tinline static void karcher_mean(V& x, const Args&... args);\n\ttemplate \n\tinline static void variadic_karcher_mean_gradient(V& x, const V& y);\n\ttemplate \n\tinline static void variadic_karcher_mean_gradient(V& x, const V& y1, const Args&... args);\n\t\n\t// Basis transformation for restriction to tangent space\n\tinline static void tangent_plane_base(cref_type x, tm_base_ref_type result);\n\n\t// Projection\n\tinline static void projector(ref_type x);\n\n\t// Interpolation pre- and postprocessing\n\tinline static void interpolation_preprocessing(ref_type x);\n\tinline static void interpolation_postprocessing(ref_type x);\n\n};\n\n\n/*-----IMPLEMENTATION SPD----------*/\n\n// Static constants, Outside definition to avoid linker error\n\ntemplate \nconst MANIFOLD_TYPE Manifold < SPD, N>::MyType = SPD; \n\ntemplate \nconst int Manifold < SPD, N>::manifold_dim = N * (N + 1) / 2; \n\ntemplate \nconst int Manifold < SPD, N>::value_dim = N * N; \n\ntemplate \nconst bool Manifold < SPD, N>::non_isometric_embedding = true; \n\n\n// Squared SPD distance function\ntemplate \ninline typename Manifold < SPD, N>::dist_type Manifold < SPD, N>::dist_squared( cref_type x, cref_type y ){\n #ifdef TV_SPD_DIST_DEBUG\n\tstd::cout << \"\\nDist2 function with x=\\n\" << x << \"\\nand y=\\n\" << y << std::endl;\n #endif\n// NOTE: If x is not strictly spd, using LDLT completely halts the algorithm\n/* value_type sqrtX = x.sqrt();\n Eigen::LDLT ldlt;\n ldlt.compute(sqrtX);\n\n value_type Z = ldlt.solve(y).transpose();\t\n return ldlt.solve(Z).transpose().log().squaredNorm();\t*/\n value_type invsqrt = x.sqrt().inverse();\n return (invsqrt * y * invsqrt).log().squaredNorm();\n}\n\n\n// Derivative of Squared SPD distance w.r.t. first argument\n// TODO: Switch to solve() for N>4?\ntemplate \ninline void Manifold < SPD, N>::deriv1x_dist_squared( cref_type x, cref_type y, deriv1_ref_type result){\n value_type invsqrt = x.sqrt().inverse();\n result = -2.0 * invsqrt * (invsqrt * y * invsqrt).log() * invsqrt;\n}\n// Derivative of Squared SPD distance w.r.t. second argument\ntemplate \ninline void Manifold < SPD, N>::deriv1y_dist_squared( cref_type x, cref_type y, deriv1_ref_type result){\n deriv1x_dist_squared(y, x, result);\n}\n\n\n// Second Derivative of Squared SPD distance w.r.t first argument\ntemplate \ninline void Manifold < SPD, N>::deriv2xx_dist_squared( cref_type x, cref_type y, deriv2_ref_type result){\n value_type T2, T3, T4;\n T2 = x.sqrt().inverse();\n T3 = T2 * y * T2;\n T4 = T3.log();\n\n deriv2_type dlog, dsqrt;\n KroneckerDLog(T3, dlog);\n KroneckerDSqrt(x, dsqrt);\n\n deriv2_type T2T4tId, IdT2T4, T2T2, T2yId, IdT2y;\n T2T4tId = Eigen::kroneckerProduct(T2 * T4.transpose(), value_type::Identity());\n IdT2T4 = Eigen::kroneckerProduct(value_type::Identity(), T2 * T4);\n T2T2 = Eigen::kroneckerProduct(T2, T2);\n T2yId = Eigen::kroneckerProduct(T2 * y, value_type::Identity());\n IdT2y = Eigen::kroneckerProduct(value_type::Identity(), T2 * y);\n\n result = 2 * (T2T4tId + IdT2T4 + T2T2 * dlog * (T2yId + IdT2y) ) * T2T2 * dsqrt;\n}\n// Second Derivative of Squared SPD distance w.r.t first and second argument\ntemplate \ninline void Manifold < SPD, N>::deriv2xy_dist_squared( cref_type x, cref_type y, deriv2_ref_type result){\n value_type isqrtX, T1;\n isqrtX = x.sqrt().eval().inverse();\n T1 = isqrtX * y * isqrtX;\n\n deriv2_type kp_isqrtX, dlog;\n kp_isqrtX = Eigen::kroneckerProduct(isqrtX, isqrtX);\n KroneckerDLog(T1, dlog);\n\n result = -2 * kp_isqrtX * dlog * kp_isqrtX;\n}\n// Second Derivative of Squared SPD distance w.r.t second argument\ntemplate \ninline void Manifold < SPD, N>::deriv2yy_dist_squared( cref_type x, cref_type y, deriv2_ref_type result){\n deriv2xx_dist_squared(y, x, result);\n}\n\n\n\n// Exponential and Logarithm Map\ntemplate \ntemplate \ninline void Manifold ::exp(const Eigen::MatrixBase& x, const Eigen::MatrixBase& y, Eigen::MatrixBase& result){\n #ifdef TV_SPD_EXP_DEBUG\n\tstd::cout << \"\\nEXP function with x=\\n\" << x << \"\\nand y=\\n\" << y << std::endl;\n #endif\n value_type sqrtX = x.sqrt();\n value_type Z = sqrtX.ldlt().solve(y).transpose();\t\n result = sqrtX * sqrtX.transpose().ldlt().solve(Z).exp() * sqrtX;\t\n}\n\ntemplate \ninline void Manifold ::log(cref_type x, cref_type y, ref_type result){\n #ifdef TV_SPD_LOG_DEBUG\n\tstd::cout << \"\\nLOG function with x=\\n\" << x << \"\\nand y=\\n\" << y << std::endl;\n #endif\n value_type sqrtX = x.sqrt();\n value_type Z = sqrtX.ldlt().solve(y).transpose();\t\n result = sqrtX * sqrtX.transpose().ldlt().solve(Z).log() * sqrtX;\t\n}\n\n// Tangent Plane restriction\ntemplate \ninline void Manifold ::tangent_plane_base(cref_type x, tm_base_ref_type result){\n int d = value_type::RowsAtCompileTime;\n int k = 0;\n \n value_type S, T;\n S = x.sqrt();\n\n for(int i=0; i(T.data(), T.size());\n\t++k;\n }\n\n for(int i=0; i(T.data(), T.size());\n\t ++k;\n\t}\n}\n\ntemplate \ninline void Manifold ::projector(ref_type x){\n // does not exist since SPD is an open set\n // TODO: Eventually implement projection to semi positive definite matrices\n}\n\n// Convex geodesic combinations\ntemplate \ninline void Manifold ::convex_combination(cref_type x, cref_type y, double t, ref_type result){\n value_type l;\n log(x, y, l);\n exp(x, l * t, result);\n}\n\n// Karcher mean implementations\ntemplate \ninline void Manifold::karcher_mean(ref_type x, const value_list& v, double tol, int maxit){\n value_type L, temp;\n \n int k = 0;\n double error = 0.0;\n do{\n\tscalar_type m1 = x.sum();\n\tL = value_type::Zero();\n\tfor(int i = 0; i < v.size(); ++i){\n\t log(x, v[i], temp);\n\t L += temp;\n\t}\n\texp(x, 0.5 / v.size() * (L + L.transpose()), temp);\n\tx = temp;\n\terror = std::abs(x.sum() - m1);\n\t++k;\n } while(error > tol && k < maxit);\n\n}\n\ntemplate \ninline void Manifold::weighted_karcher_mean(ref_type x, const weight_list& w, const value_list& v, double tol, int maxit){\n value_type L, temp;\n \n int k = 0;\n double error = 0.0;\n do{\n\tscalar_type m1 = x.sum();\n\tL = value_type::Zero();\n\tfor(int i = 0; i < v.size(); ++i){\n\t log(x, v[i], temp);\n\t L += w[i] * temp;\n\t}\n\texp(x, 0.5 / v.size() * (L + L.transpose()), temp);\n\tx = temp;\n\terror = std::abs(x.sum() - m1);\n\t++k;\n } while(error > tol && k < maxit);\n\n}\n\ntemplate \ntemplate \ninline void Manifold::karcher_mean(V& x, const Args&... args){\n V temp, sum;\n \n int numArgs = sizeof...(args);\n int k = 0;\n double error = 0.0; \n double tol = 1e-10;\n int maxit = 15;\n do{\n\tscalar_type m1 = x.sum();\n\tsum = x;\n\tvariadic_karcher_mean_gradient(sum, args...);\n\texp(x, 0.5 / numArgs * (sum + sum.transpose()), temp);\n\tx = temp;\n\terror = std::abs(x.sum() - m1);\n\t++k;\n } while(error > tol && k < maxit);\n}\n\ntemplate \ntemplate \ninline void Manifold::variadic_karcher_mean_gradient(V& x, const V& y){\n V temp;\n log(x, y, temp);\n x = temp;\n}\n\ntemplate \ntemplate \ninline void Manifold::variadic_karcher_mean_gradient(V& x, const V& y1, const Args& ... args){\n V temp1, temp2;\n temp2 = x;\n \n log(x, y1, temp1);\n\n variadic_karcher_mean_gradient(temp2, args...);\n temp1 += temp2;\n x = temp1;\n}\n\n\ntemplate \ninline void Manifold::interpolation_preprocessing(ref_type x){\n value_type t = x.log();\n x = t;\n}\n\ntemplate \ninline void Manifold::interpolation_postprocessing(ref_type x){\n value_type t = ( 0.5 * (x + x.transpose()) ).exp();\n x = t;\n}\n\n\n} // end namespace tvmtl\n\n\n\n\n\n\n\n\n#endif\n", "meta": {"hexsha": "8c9bebc1cc33ba368dd51922934834b95e9c6669", "size": 10996, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "mtvmtl/core/manifold_spd.hpp", "max_stars_repo_name": "pdebus/MTVMTL", "max_stars_repo_head_hexsha": "65a7754b34d1f6a1e86d15e3c2d4346b9418414f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2017-05-08T12:40:46.000Z", "max_stars_repo_stars_event_max_datetime": "2019-06-02T05:11:01.000Z", "max_issues_repo_path": "mtvmtl/core/manifold_spd.hpp", "max_issues_repo_name": "pdebus/MTVMTL", "max_issues_repo_head_hexsha": "65a7754b34d1f6a1e86d15e3c2d4346b9418414f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mtvmtl/core/manifold_spd.hpp", "max_forks_repo_name": "pdebus/MTVMTL", "max_forks_repo_head_hexsha": "65a7754b34d1f6a1e86d15e3c2d4346b9418414f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2087912088, "max_line_length": 148, "alphanum_fraction": 0.6777009822, "num_tokens": 3273, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8152324803738429, "lm_q2_score": 0.611381973294151, "lm_q1q2_score": 0.4984184425444453}} {"text": "#include \n#include \n#include \n#include \n\nstruct PoseData {\n Eigen::Vector3d p;\n Eigen::Quaterniond q;\n std::string filename;\n int index;\n};\n\ninline Eigen::Vector3d logmap(const Eigen::Quaterniond &q) {\n Eigen::AngleAxisd aa(q);\n return aa.angle() * aa.axis();\n}\n\ninline std::vector read_7scenes_pose(const std::string &path, int pose_num, const std::string &pose_type) {\n std::vector poses;\n // printf(\"#idx px py pz qx qy qz qw\\n\");\n for (int idx = 0; idx < pose_num; idx++) {\n char filename[1024];\n if (pose_type == \"gt\") {\n sprintf(filename, \"%s/frame-%06d.pose.txt\", path.c_str(), idx);\n } else if (pose_type == \"test\") {\n sprintf(filename, \"%s/frame-%06d.txt\", path.c_str(), idx);\n } else {\n std::runtime_error(\"Unknown pose_type.\");\n }\n std::ifstream in(filename);\n Eigen::Matrix4d e;\n for (int r = 0; r < 4; r++) {\n for (int c = 0; c < 4; c++) {\n in >> e(r, c);\n }\n }\n // origin: Twc\n Eigen::Quaterniond qwc(e.block<3, 3>(0, 0));\n Eigen::Vector3d pwc(e.block<3, 1>(0, 3));\n if (std::abs(qwc.norm() - 1.0) > 1.0e-3) {\n fprintf(stderr, \"Warning, rotation matrix may not be valid, qwc.norm() %.4e\", qwc.norm());\n }\n if (pose_type == \"gt\") {\n pwc.x() += 0.0245;\n } else if (pose_type == \"test\") {\n pwc = -(qwc.conjugate() * pwc);\n qwc = qwc.conjugate();\n } else {\n std::runtime_error(\"Unknown pose_type.\");\n }\n PoseData pose;\n pose.q = qwc;\n pose.p = pwc;\n pose.filename = filename;\n pose.index = idx;\n poses.emplace_back(std::move(pose));\n // printf(\"%05d %.9e %.9e %.9e %.9e %.9e %.9e %.9e\\n\", idx, pwc.x(), pwc.y(), pwc.z(), qwc.x(), qwc.y(), qwc.z(), qwc.w());\n }\n return poses;\n}\n\nint main(int argc, char **argv) {\n std::string gt_dir = argv[1];\n std::string pred_dir = argv[2];\n const int pose_num = 1000;\n auto gt_poses = read_7scenes_pose(gt_dir, pose_num, \"gt\");\n auto pred_poses = read_7scenes_pose(pred_dir, pose_num, \"test\");\n assert(pred_poses.size() == gt_poses.size());\n double APE = 0, ARE = 0, Acount = 0;\n std::vector APEs, AREs;\n for (int i = 0; i < pose_num; ++i) {\n const auto >_pose = gt_poses[i];\n const auto &pred_pose = pred_poses[i];\n Eigen::Vector3d p_error = gt_pose.p - pred_pose.p;\n Eigen::Vector3d q_error = logmap(gt_pose.q.conjugate() * pred_pose.q);\n APE += p_error.squaredNorm();\n ARE += q_error.squaredNorm();\n APEs.push_back(p_error.norm() * 1e3);\n AREs.push_back(q_error.norm() * 180 / M_PI);\n Acount++;\n }\n Acount = std::max(Acount, 1.0);\n APE = std::sqrt(APE / Acount) * 1e3;\n ARE = std::sqrt(ARE / Acount) * 180 / M_PI;\n printf(\"APE RMSE: %.3f [mm]\\nARE RMSE %.3f[DEG]\\n\", APE, ARE);\n std::sort(APEs.begin(), APEs.end());\n std::sort(AREs.begin(), AREs.end());\n printf(\"APE median: %.3f [mm]\\nARE median %.3f[DEG]\\n\", APEs[APEs.size() / 2], AREs[AREs.size() / 2]);\n return 0;\n}\n", "meta": {"hexsha": "7dee6e2b75d3b1b632c5f36d0e25a8d15604f4dd", "size": 3241, "ext": "cc", "lang": "C++", "max_stars_repo_path": "src/exec/evaluate_pose.cc", "max_stars_repo_name": "asdiuzd/lass", "max_stars_repo_head_hexsha": "a767f8bd68c46dadf8d74703fdf2058da9f17e53", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2022-01-06T09:04:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-06T09:04:46.000Z", "max_issues_repo_path": "src/exec/evaluate_pose.cc", "max_issues_repo_name": "asdiuzd/lass", "max_issues_repo_head_hexsha": "a767f8bd68c46dadf8d74703fdf2058da9f17e53", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/exec/evaluate_pose.cc", "max_forks_repo_name": "asdiuzd/lass", "max_forks_repo_head_hexsha": "a767f8bd68c46dadf8d74703fdf2058da9f17e53", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.6153846154, "max_line_length": 131, "alphanum_fraction": 0.5427337242, "num_tokens": 1017, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8006920116079209, "lm_q2_score": 0.6224593312018546, "lm_q1q2_score": 0.49839821404413404}} {"text": "#ifndef __PC_TO_SURFACES_HH__\n#define __PC_TO_SURFACES_HH__\n\n#include \n#include \n\n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n\n\n#include \n\n#include \n#include \n\n#include \n\nstruct PC2SurfacesParams{\n public:\n PC2SurfacesParams();\n void print();\n\n int max_inner_iter;\n int max_outer_iter;\n int outlier_id;\n int unclassified_id;\n int object_id;\n double term_perc_crit;\n double normal_dev_thres1; // degrees\n double normal_dev_thres2; \n double contour_fit_thres; // ratio to radius\n double sphere_r; // meters\n double segment_len; // meters\n double normal_search_radius;\n int num_segments;\n string contour_type;\n double voxel_leaf_size;\n double curvature_thres;\n double var_r;\n double var_azimutal;\n double var_elevation;\n double max_segment_dist;\n double max_segment_rot;\n};\n\nclass PC2Surfaces{\n private:\n PC2SurfacesParams _params;\n\n // ------------ BOOKKEEPING --------------------------------------------------- //\n // Segment ID of each point in '_pc_sphere'\n vector _segment_ids;\n vector _valid_segs;\n // Outlier flags for points in '_pc_sphere'\n vector _outliers;\n // Mapping between segments and their corresponding \n // coordinate triad, origin and contour equations.\n std::map _segment_triad_map;\n std::map _segment_origin_map;\n std::map _segment_contour_map;\n\n // ------------ AXIS, CONTOUR & UNCERTAINTY ESTIMATION ------------------------ //\n std::map _segment_Mmatrix;\n // Mapping between segments and eigenpairs of the 'M' matrix as \n // stored in '_segment_Mmatrix'.\n std::map > _axis_eigenpairs;\n // Mapping between segments and index of the smallest eigen value\n // of the corresponding eigenpair.\n std::map _axis_min_eigval_ind;\n // Mapping between segments and their corresponding axes's \n // uncertainties.\n std::map _axis_uncertainties;\n // Mapping between segments and the intermediate matrix-vector pairs\n // calculated while estimating the contour the corresponding segment.\n // This is a function of the form Ax=b.\n std::map > _segment_contour_equ;\n // Mapping between segments and the derivative of the corresponding\n // contour w.r.t. 'x' the solution to Ax=b explained above.\n std::map _segment_dcontour;\n // Mapping between the segments and the corresponding contour \n // uncertainties.\n std::map _contour_uncertainties;\n\n // ------------ POINTCLOUDS --------------------------------------------------- //\n // The original unfiltered point cloud.\n pcl::PointCloud::Ptr _pc_orig;\n // '_pc_orig' after filtered with voxel grid filter and radius filter.\n pcl::PointCloud::Ptr _pc_sphere;\n pcl::PointCloud::Ptr _pc_outliers;\n pcl::PointCloud::Ptr _pc_objects;\n // Normals of the points in '_pc_sphere'.\n pcl::PointCloud::Ptr _pc_sphere_normals;\n // '_pc_sphere' written in the corresponding segment frame.\n vector _pc_projections; \n\n // ------------ NORMAL & UNCERTAINTY ESTIMATION ------------------------------- //\n // Some intermediate variables used in estimating surface normals.\n vector > _nearest_neigh_inds;\n vector > _nearest_neigh_sq_dists;\n // Covariance and centroids for each point in '_pc_sphere' \n vector _normal_covs;\n vector _normal_centroids;\n // Eigenpairs of each '_normal_covs'\n vector > _normal_eigenpairs;\n // Index of the smallest eigenvalue of the eigenpairs in '_normal_eigenpairs'\n vector _normal_min_eigval_ind;\n // Uncertainties of each normals vector\n vector _normal_uncertainties;\n // Uncertainties each point in '_pc_sphere' written in the sensor frame.\n vector _point_uncertainties;\n\n // PCL's visualization toolbox\n pcl::visualization::PCLVisualizer::Ptr _viewer;\n\n // ---------------------------------------------------------------------------- //\n // This fucntion estimates surface normals for each point in '_pc_sphere' as well\n // as it estimates each normal's uncertainty.\n int _fit_normals();\n // This function initializes/refines the coordinate frame and the origin of the \n // segment 'seg'. It also generates intermediate variables for later axis \n // uncertainty estimation.\n int _init_triad(int seg);\n // This function iteratively filters for a segment, fits contour and eliminates\n // outliers until convergence.\n int _fit_segment(int seg);\n // This function assigns points satisfying the criterion given in '_params' to 'seg'.\n int _filter_segment(int seg);\n // This function eliminates outliers of segment 'seg' with either using\n // the 'normal' or 'contour' method.\n int _eliminate_outliers(int seg, const std::string &method);\n // This function transforms points of the segment 'seg' from the sensor\n // frame to the corresponding segment frame.\n int _project_pc(int seg);\n // This function fits a contour to the projected points of the segment 'seg'\n // using the method given in '_params'.\n int _fit_contour(int seg);\n // This function estimates axis and contour uncertainties of the segment 'seg'\n int _estimate_uncertainties(int seg);\n public:\n\n PC2Surfaces();\n PC2Surfaces(const PC2SurfacesParams ¶ms);\n\n int push_pc(const pcl::PointCloud::Ptr &pc);\n\n inline int set_params(const PC2SurfacesParams ¶ms){ \n _params = params; \n return 0;\n }\n\n inline int get_params(PC2SurfacesParams ¶ms){ \n params = _params; \n return 0;\n }\n\n inline int get_segment_ids(vector &ids, vector &outliers){\n ids = _segment_ids;\n outliers = _outliers;\n return _segment_ids.size();\n }\n\n inline int get_segments( \n map &segment_origins, \n map &segment_triads, \n map &segment_contours){\n segment_origins.clear();\n segment_triads.clear();\n segment_contours.clear();\n for(int i = 0 ; i < (int)_valid_segs.size() ; i++){\n int seg = _valid_segs[i];\n segment_origins[seg] = _segment_origin_map[seg];\n segment_triads[seg] = _segment_triad_map[seg];\n segment_contours[seg] = _segment_contour_map[seg];\n }\n return segment_triads.size();\n }\n\n inline int get_orig_pc(pcl::PointCloud::Ptr &pc){\n if(_pc_orig){\n pc = pcl::PointCloud::Ptr(new pcl::PointCloud(*_pc_orig));\n return pc->points.size();\n } else {\n pc = NULL;\n return 0;\n }\n }\n\n inline int get_pc_sphere(pcl::PointCloud::Ptr &pc){\n if(_pc_sphere){\n pc = pcl::PointCloud::Ptr(new pcl::PointCloud(*_pc_sphere));\n return pc->points.size();\n } else {\n pc = NULL;\n return 0;\n }\n }\n\n inline int get_pc_outliers(pcl::PointCloud::Ptr &pc){\n if(_pc_outliers){\n pc = pcl::PointCloud::Ptr(new pcl::PointCloud(*_pc_outliers));\n return pc->points.size();\n } else {\n pc = NULL;\n return 0;\n }\n }\n\n inline int get_pc_objects(pcl::PointCloud::Ptr &pc){\n if(_pc_objects){\n pc = pcl::PointCloud::Ptr(new pcl::PointCloud(*_pc_objects));\n return pc->points.size();\n } else {\n pc = NULL;\n return 0;\n }\n }\n\n\n\n inline int get_normals(pcl::PointCloud::Ptr &normals){\n if(_pc_sphere_normals){\n normals = pcl::PointCloud::Ptr(new pcl::PointCloud(*_pc_sphere_normals));\n return normals->points.size();\n } else {\n normals = NULL;\n return 0;\n }\n }\n\n inline int get_uncertainties(\n map &axis_uncertainties, \n map &contour_uncertainties){\n axis_uncertainties = _axis_uncertainties;\n contour_uncertainties = _contour_uncertainties;\n return axis_uncertainties.size();\n }\n\n inline int get_point_uncertainties(vector &covs){\n covs = _point_uncertainties;\n return covs.size();\n }\n\n inline int get_normal_uncertainties(vector &covs){\n covs = _normal_uncertainties;\n return covs.size();\n }\n\n int get_projections(int seg, vector &proj, vector &outliers);\n int get_segment_pc(int seg, pcl::PointCloud &pc);\n\n int visualize_fit();\n};\n\n\n\n#endif\n", "meta": {"hexsha": "dc4addaabdc5dc0789d347c63a4b37c17f95ee01", "size": 9699, "ext": "hh", "lang": "C++", "max_stars_repo_path": "tunnel_estimator/include/tunnel_estimator/pc_to_surfaces.hh", "max_stars_repo_name": "ozaslan/estimators", "max_stars_repo_head_hexsha": "ad78f2d395d4a6155f0b6d61541167a99959a1c9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tunnel_estimator/include/tunnel_estimator/pc_to_surfaces.hh", "max_issues_repo_name": "ozaslan/estimators", "max_issues_repo_head_hexsha": "ad78f2d395d4a6155f0b6d61541167a99959a1c9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tunnel_estimator/include/tunnel_estimator/pc_to_surfaces.hh", "max_forks_repo_name": "ozaslan/estimators", "max_forks_repo_head_hexsha": "ad78f2d395d4a6155f0b6d61541167a99959a1c9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0557620818, "max_line_length": 207, "alphanum_fraction": 0.6505825343, "num_tokens": 2318, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8006920116079209, "lm_q2_score": 0.6224593312018545, "lm_q1q2_score": 0.498398214044134}} {"text": "/*************************************************************************\n\t> File Name: lqr_steer_control.cpp\n\t> Author: TAI Lei\n\t> Mail: ltai@ust.hk\n\t> Created Time: Wed Apr 17 11:48:46 2019\n ************************************************************************/\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"cubic_spline.h\"\n#include \"motion_model.h\"\n#include \"cpprobotics_types.h\"\n\n#define DT 0.1\n#define L 0.5\n#define KP 1.0\n#define MAX_STEER 45.0/180*M_PI\n\nusing namespace cpprobotics;\n\ncv::Point2i cv_offset(\n float x, float y, int image_width=2000, int image_height=2000){\n cv::Point2i output;\n output.x = int(x * 100) + 300;\n output.y = image_height - int(y * 100) - image_height/2;\n return output;\n};\n\nVec_f calc_speed_profile(Vec_f rx, Vec_f ry, Vec_f ryaw, float target_speed){\n\tVec_f speed_profile(ryaw.size(), target_speed);\n\n\tfloat direction = 1.0;\n\tfor(unsigned int i=0; i < ryaw.size()-1; i++){\n\t\tfloat dyaw = std::abs(ryaw[i+1] - ryaw[i]);\n\t\tfloat switch_point = (M_PI/4.0< dyaw) && (dyaw::max();\n\tfor(unsigned int i=0; i= MAX_STEER) delta = MAX_STEER;\n if (delta <= - MAX_STEER) delta = - MAX_STEER;\n\n state.x = state.x + state.v * std::cos(state.yaw) * DT;\n state.y = state.y + state.v * std::sin(state.yaw) * DT;\n state.yaw = state.yaw + state.v / L * std::tan(delta) * DT;\n state.v = state.v + a * DT;\n\n};\n\nvoid closed_loop_prediction(Vec_f cx, Vec_f cy, Vec_f cyaw, Vec_f ck, Vec_f speed_profile, Poi_f goal){\n\tfloat T = 500.0;\n\tfloat goal_dis = 0.5;\n\tfloat stop_speed = 0.05;\n\n\tState state(-0.0, -0.0, 0.0, 0.0);\n\n\tfloat time_ = 0.0;\n\tVec_f x;\n\tx.push_back(state.x);\n\tVec_f y;\n\ty.push_back(state.y);\n\tVec_f yaw;\n\tyaw.push_back(state.yaw);\n\tVec_f v;\n\tv.push_back(state.v);\n\tVec_f t;\n\tt.push_back(0.0);\n\n\tfloat e = 0;\n\tfloat e_th = 0;\n\tint ind = 0;\n\n\n cv::namedWindow(\"lqr\", cv::WINDOW_NORMAL);\n int count = 0;\n\n\n\tcv::Mat bg(2000, 2000, CV_8UC3, cv::Scalar(255, 255, 255));\n\tfor(unsigned int i=1; i= time_){\n\t\tfloat di = lqr_steering_control(state, cx, cy, cyaw, ck, ind, e, e_th);\n\t\tfloat ai = KP * (speed_profile[ind]-state.v);\n\t update(state, ai, di);\n\n\t\tif (std::abs(state.v) <= stop_speed) ind += 1;\n\n\t\tfloat dx = state.x - goal[0];\n\t\tfloat dy = state.y - goal[1];\n\t\tif (std::sqrt(dx*dx + dy*dy) <= goal_dis) {\n\t\t\tstd::cout<<(\"Goal\")< point_ = csp_obj.calc_postion(i);\n\t\tr_x.push_back(point_[0]);\n\t\tr_y.push_back(point_[1]);\n\t\tryaw.push_back(csp_obj.calc_yaw(i));\n\t\trcurvature.push_back(csp_obj.calc_curvature(i));\n\t\trs.push_back(i);\n\t}\n\tfloat target_speed = 10.0 / 3.6;\n\tVec_f speed_profile = calc_speed_profile(r_x, r_y, ryaw, target_speed);\n\tclosed_loop_prediction(r_x, r_y, ryaw, rcurvature, speed_profile, {{wx.back(), wy.back()}});\n\n}", "meta": {"hexsha": "8b1f1632d1854e604784d12c9807f19dd47c46f3", "size": 6027, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/lqr_steer_control.cpp", "max_stars_repo_name": "Singh-sid930/CppRobotics", "max_stars_repo_head_hexsha": "0e4ced2cf1c927156cd3745dee2b2e7250ce95d7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2021-06-27T07:09:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-17T07:54:34.000Z", "max_issues_repo_path": "src/lqr_steer_control.cpp", "max_issues_repo_name": "sweetquiet/CppRobotics", "max_issues_repo_head_hexsha": "c5a8cc9a958ee64ab80b9726dc70a3c11f499bd0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/lqr_steer_control.cpp", "max_forks_repo_name": "sweetquiet/CppRobotics", "max_forks_repo_head_hexsha": "c5a8cc9a958ee64ab80b9726dc70a3c11f499bd0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-03-11T13:53:59.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-11T13:53:59.000Z", "avg_line_length": 25.3235294118, "max_line_length": 117, "alphanum_fraction": 0.612742658, "num_tokens": 2148, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8006920020959544, "lm_q2_score": 0.6224593312018546, "lm_q1q2_score": 0.4983982081233217}} {"text": "#define PY_ARRAY_UNIQUE_SYMBOL phist_PyArray_API\n#define NO_IMPORT_ARRAY\n\n#include \n#include \n\n#include \n\n\n#include \n\n\n\n#include \n#include \n\n#include \"seglib/histogram/histogram.hxx\"\n#include \"seglib/histogram/histogram_python.hxx\"\n\n\nnamespace python = boost::python;\n\nnamespace histogram {\n\n\n\n\n vigra::NumpyAnyArray jointColorHistogram(\n\n vigra::NumpyArray<2, vigra::TinyVector > img,\n const vigra::TinyVector & min,\n const vigra::TinyVector & max,\n const vigra::TinyVector & bins,\n const size_t r,\n //output\n vigra::NumpyArray<5, float > res = vigra::NumpyArray<5, float >()\n\n\n ){\n\n // allocate output\n typedef typename vigra::NumpyArray<5, float >::difference_type Shape5;\n Shape5 shape(img.shape(0),img.shape(1),int(bins[0]),int(bins[1]),int(bins[2]));\n res.reshapeIfEmpty(shape);\n std::fill(res.begin(),res.end(),0.0);\n // \n Shape5 histCoord;\n const vigra::TinyVector ones(1,1,1);\n const vigra::TinyVector radius(r,r);\n const vigra::TinyVector radius1(r+1,r+1);\n const vigra::TinyVector fac = ( (bins-ones) / (max-min) );\n\n\n vigra::TinyVector start,end,c;\n\n for(histCoord[0]=0;histCoord[0] pixelValue = img(c[0],c[1]);\n pixelValue -= min;\n pixelValue *= fac;\n\n // (the first two coordinates of hist coord are filled)\n histCoord[2]=int(pixelValue[0]);\n histCoord[3]=int(pixelValue[1]);\n histCoord[4]=int(pixelValue[2]);\n\n\n PHIST_ASSERT_OP(histCoord[0],<,img.shape(0));\n PHIST_ASSERT_OP(histCoord[1],<,img.shape(1));\n\n PHIST_ASSERT_OP(histCoord[2],<,bins[0]);\n PHIST_ASSERT_OP(histCoord[3],<,bins[1]);\n PHIST_ASSERT_OP(histCoord[4],<,bins[2]);\n\n // increment counter\n res(histCoord[0],histCoord[1],histCoord[2],histCoord[3],histCoord[4])+=1.0;\n } \n } \n\n // normalizes\n for(histCoord[0]=0;histCoord[0] > img,\n vigra::NumpyArray<1, float > min,\n vigra::NumpyArray<1, float > max,\n const size_t bins,\n const size_t r,\n //output\n vigra::NumpyArray<4, float > res = vigra::NumpyArray<4, float >()\n ){ \n const size_t nChannels=img.shape(2);\n // allocate output\n typedef typename vigra::NumpyArray<4, float >::difference_type Shape4;\n Shape4 shape(img.shape(0),img.shape(1),nChannels,bins);\n res.reshapeIfEmpty(shape);\n std::fill(res.begin(),res.end(),0.0);\n\n\n // coordinate in the res array (pixel wise histogram)\n // (x,y,c,bin)\n Shape4 histCoord;\n const vigra::TinyVector radius1(r+1,r+1);\n // channel wise factor\n std::vector fac(nChannels);\n for(size_t channel=0;channel start,end,c;\n\n for(histCoord[0]=0;histCoord[0]((value - min(histCoord[2]) )*fac[histCoord[2]]);\n\n /*\n std::cout<<\"\\n\\n\u201d channel \"< > img,\n // input labeling for which are used as mask images\n vigra::NumpyArray<3, vigra::Multiband > labelings,\n vigra::NumpyArray<1, UInt64> numberOfLabels,\n vigra::NumpyArray<1, float> weights\n ){\n\n const size_t numberOfInputLabelings = labelings.shape(2);\n \n }\n */\n\n\n void export_histogram(){\n\n python::def(\"_jointColorHistogram_\",vigra::registerConverters(&jointColorHistogram),\n (\n python::arg(\"img\"),\n python::arg(\"dmin\"),\n python::arg(\"dmax\"),\n python::arg(\"bins\"),\n python::arg(\"r\"),\n python::arg(\"out\")=python::object()\n )\n );\n\n python::def(\"_batchHistogram_\",vigra::registerConverters(&batchHistogram),\n (\n python::arg(\"img\"),\n python::arg(\"dmin\"),\n python::arg(\"dmax\"),\n python::arg(\"bins\"),\n python::arg(\"r\"),\n python::arg(\"out\")=python::object()\n )\n );\n\n }\n\n}", "meta": {"hexsha": "7bb18c1d689668ebf2d99fa8e94f04d6fb3a3297", "size": 8237, "ext": "cxx", "lang": "C++", "max_stars_repo_path": "src/python/histogram/py_histogram.cxx", "max_stars_repo_name": "DerThorsten/seglib", "max_stars_repo_head_hexsha": "4655079e390e301dd93e53f5beed6c9737d6df9f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/python/histogram/py_histogram.cxx", "max_issues_repo_name": "DerThorsten/seglib", "max_issues_repo_head_hexsha": "4655079e390e301dd93e53f5beed6c9737d6df9f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/python/histogram/py_histogram.cxx", "max_forks_repo_name": "DerThorsten/seglib", "max_forks_repo_head_hexsha": "4655079e390e301dd93e53f5beed6c9737d6df9f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.0803212851, "max_line_length": 100, "alphanum_fraction": 0.5108656064, "num_tokens": 2252, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8006920020959544, "lm_q2_score": 0.6224593312018546, "lm_q1q2_score": 0.4983982081233217}} {"text": "#include \n", "meta": {"hexsha": "b8c3677a8213310c6f0eaf93a348db60157c2604", "size": 48, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_random_cauchy_distribution.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_random_cauchy_distribution.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_random_cauchy_distribution.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 24.0, "max_line_length": 47, "alphanum_fraction": 0.8333333333, "num_tokens": 10, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8006919830720203, "lm_q2_score": 0.6224593312018546, "lm_q1q2_score": 0.4983981962816964}} {"text": "/*\n * Copyright Nick Thompson, 2020\n * Use, modification and distribution are subject to the\n * Boost Software License, Version 1.0. (See accompanying file\n * LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n */\n\n#include \"math_unit_test.hpp\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#ifdef BOOST_HAS_FLOAT128\n#include \nusing boost::multiprecision::float128;\n#endif\n\n\nusing boost::math::interpolators::septic_hermite;\nusing boost::math::interpolators::cardinal_septic_hermite;\nusing boost::math::interpolators::cardinal_septic_hermite_aos;\n\ntemplate\nvoid test_constant()\n{\n\n std::vector x{0,1,2,3, 9, 22, 81};\n std::vector y(x.size());\n std::vector dydx(x.size(), 0);\n std::vector d2ydx2(x.size(), 0);\n std::vector d3ydx3(x.size(), 0);\n for (auto & t : y)\n {\n t = 7;\n }\n\n auto sh = septic_hermite(std::move(x), std::move(y), std::move(dydx), std::move(d2ydx2), std::move(d3ydx3));\n\n for (Real t = 0; t <= 81; t += 0.25)\n {\n CHECK_ULP_CLOSE(Real(7), sh(t), 24);\n CHECK_ULP_CLOSE(Real(0), sh.prime(t), 24);\n }\n\n Real x0 = 0;\n Real dx = 1;\n y.resize(128, 7);\n dydx.resize(128, 0);\n d2ydx2.resize(128, 0);\n d3ydx3.resize(128, 0);\n auto csh = cardinal_septic_hermite(std::move(y), std::move(dydx), std::move(d2ydx2), std::move(d3ydx3), x0, dx);\n for (Real t = x0; t <= 127; t += 0.25)\n {\n CHECK_ULP_CLOSE(Real(7), csh(t), 24);\n CHECK_ULP_CLOSE(Real(0), csh.prime(t), 24);\n CHECK_ULP_CLOSE(Real(0), csh.double_prime(t), 24);\n }\n\n std::vector> data(128);\n for (size_t i = 0; i < data.size(); ++i)\n {\n data[i][0] = 7;\n data[i][1] = 0;\n data[i][2] = 0;\n data[i][3] = 0;\n }\n auto csh_aos = cardinal_septic_hermite_aos(std::move(data), x0, dx);\n for (Real t = x0; t <= 127; t += 0.25)\n {\n CHECK_ULP_CLOSE(Real(7), csh_aos(t), 24);\n CHECK_ULP_CLOSE(Real(0), csh_aos.prime(t), 24);\n CHECK_ULP_CLOSE(Real(0), csh_aos.double_prime(t), 24);\n }\n\n // Now check the boundaries:\n auto [tlo, thi] = csh.domain();\n int samples = 5000;\n int i = 0;\n while (i++ < samples)\n {\n CHECK_ULP_CLOSE(Real(7), csh(tlo), 2);\n CHECK_ULP_CLOSE(Real(7), csh(thi), 2);\n CHECK_ULP_CLOSE(Real(7), csh_aos(tlo), 2);\n CHECK_ULP_CLOSE(Real(7), csh_aos(thi), 2);\n CHECK_ULP_CLOSE(Real(0), csh.prime(tlo), 2);\n CHECK_ULP_CLOSE(Real(0), csh.prime(thi), 2);\n CHECK_ULP_CLOSE(Real(0), csh_aos.prime(tlo), 2);\n CHECK_ULP_CLOSE(Real(0), csh_aos.prime(thi), 2);\n CHECK_ULP_CLOSE(Real(0), csh.double_prime(tlo), 2);\n CHECK_ULP_CLOSE(Real(0), csh.double_prime(thi), 2);\n CHECK_ULP_CLOSE(Real(0), csh_aos.double_prime(tlo), 2);\n CHECK_ULP_CLOSE(Real(0), csh_aos.double_prime(thi), 2);\n\n tlo = boost::math::nextafter(tlo, std::numeric_limits::max());\n thi = boost::math::nextafter(thi, std::numeric_limits::lowest());\n }\n\n}\n\n\ntemplate\nvoid test_linear()\n{\n std::vector x{0,1,2,3,4,5,6,7,8,9};\n std::vector y = x;\n std::vector dydx(x.size(), 1);\n std::vector d2ydx2(x.size(), 0);\n std::vector d3ydx3(x.size(), 0);\n\n auto sh = septic_hermite(std::move(x), std::move(y), std::move(dydx), std::move(d2ydx2), std::move(d3ydx3));\n\n for (Real t = 0; t <= 9; t += 0.25)\n {\n CHECK_ULP_CLOSE(Real(t), sh(t), 2);\n CHECK_ULP_CLOSE(Real(1), sh.prime(t), 2);\n }\n\n boost::random::mt19937 rng;\n boost::random::uniform_real_distribution dis(0.5,1);\n x.resize(512);\n x[0] = dis(rng);\n Real xmin = x[0];\n for (size_t i = 1; i < x.size(); ++i)\n {\n x[i] = x[i-1] + dis(rng);\n }\n Real xmax = x.back();\n\n y = x;\n dydx.resize(x.size(), 1);\n d2ydx2.resize(x.size(), 0);\n d3ydx3.resize(x.size(), 0);\n\n sh = septic_hermite(std::move(x), std::move(y), std::move(dydx), std::move(d2ydx2), std::move(d3ydx3));\n\n for (Real t = xmin; t <= xmax; t += 0.125)\n {\n CHECK_ULP_CLOSE(t, sh(t), 25);\n CHECK_ULP_CLOSE(Real(1), sh.prime(t), 850);\n }\n\n Real x0 = 0;\n Real dx = 1;\n y.resize(10);\n dydx.resize(10, 1);\n d2ydx2.resize(10, 0);\n d3ydx3.resize(10, 0);\n for (size_t i = 0; i < y.size(); ++i)\n {\n y[i] = i;\n }\n auto csh = cardinal_septic_hermite(std::move(y), std::move(dydx), std::move(d2ydx2), std::move(d3ydx3), x0, dx);\n for (Real t = 0; t <= 9; t += 0.125)\n {\n CHECK_ULP_CLOSE(t, csh(t), 15);\n CHECK_ULP_CLOSE(Real(1), csh.prime(t), 15);\n CHECK_ULP_CLOSE(Real(0), csh.double_prime(t), 15);\n }\n\n std::vector> data(10);\n for (size_t i = 0; i < data.size(); ++i)\n {\n data[i][0] = i;\n data[i][1] = 1;\n data[i][2] = 0;\n data[i][3] = 0;\n }\n auto csh_aos = cardinal_septic_hermite_aos(std::move(data), x0, dx);\n for (Real t = 0; t <= 9; t += 0.125)\n {\n CHECK_ULP_CLOSE(t, csh_aos(t), 15);\n CHECK_ULP_CLOSE(Real(1), csh_aos.prime(t), 15);\n CHECK_ULP_CLOSE(Real(0), csh_aos.double_prime(t), 15);\n }\n\n // Now check the boundaries:\n auto [tlo, thi] = csh.domain();\n int samples = 5000;\n int i = 0;\n while (i++ < samples)\n {\n CHECK_ULP_CLOSE(Real(tlo), csh(tlo), 2);\n CHECK_ULP_CLOSE(Real(thi), csh(thi), 8);\n CHECK_ULP_CLOSE(Real(tlo), csh_aos(tlo), 2);\n CHECK_ULP_CLOSE(Real(thi), csh_aos(thi), 8);\n CHECK_ULP_CLOSE(Real(1), csh.prime(tlo), 2);\n CHECK_ULP_CLOSE(Real(1), csh.prime(thi), 700);\n CHECK_ULP_CLOSE(Real(1), csh_aos.prime(tlo), 2);\n CHECK_ULP_CLOSE(Real(1), csh_aos.prime(thi), 700);\n CHECK_MOLLIFIED_CLOSE(Real(0), csh.double_prime(tlo), std::numeric_limits::epsilon());\n CHECK_MOLLIFIED_CLOSE(Real(0), csh.double_prime(thi), 1200*std::numeric_limits::epsilon());\n CHECK_MOLLIFIED_CLOSE(Real(0), csh_aos.double_prime(tlo), std::numeric_limits::epsilon());\n CHECK_MOLLIFIED_CLOSE(Real(0), csh_aos.double_prime(thi), 1200*std::numeric_limits::epsilon());\n\n tlo = boost::math::nextafter(tlo, std::numeric_limits::max());\n thi = boost::math::nextafter(thi, std::numeric_limits::lowest());\n }\n\n}\n\ntemplate\nvoid test_quadratic()\n{\n std::vector x{0,1,2,3,4,5,6,7,8,9};\n std::vector y(x.size());\n for (size_t i = 0; i < y.size(); ++i)\n {\n y[i] = x[i]*x[i]/2;\n }\n\n std::vector dydx(x.size());\n for (size_t i = 0; i < y.size(); ++i)\n {\n dydx[i] = x[i];\n }\n\n std::vector d2ydx2(x.size(), 1);\n std::vector d3ydx3(x.size(), 0);\n\n auto sh = septic_hermite(std::move(x), std::move(y), std::move(dydx), std::move(d2ydx2), std::move(d3ydx3));\n\n for (Real t = 0; t <= 9; t += 0.0078125)\n {\n CHECK_ULP_CLOSE(t*t/2, sh(t), 100);\n CHECK_ULP_CLOSE(t, sh.prime(t), 32);\n }\n\n boost::random::mt19937 rng;\n boost::random::uniform_real_distribution dis(0.5,1);\n x.resize(8);\n x[0] = dis(rng);\n Real xmin = x[0];\n for (size_t i = 1; i < x.size(); ++i)\n {\n x[i] = x[i-1] + dis(rng);\n }\n Real xmax = x.back();\n\n y.resize(x.size());\n for (size_t i = 0; i < y.size(); ++i)\n {\n y[i] = x[i]*x[i]/2;\n }\n\n dydx.resize(x.size());\n for (size_t i = 0; i < y.size(); ++i)\n {\n dydx[i] = x[i];\n }\n\n d2ydx2.resize(x.size(), 1);\n d3ydx3.resize(x.size(), 0); \n\n sh = septic_hermite(std::move(x), std::move(y), std::move(dydx), std::move(d2ydx2), std::move(d3ydx3));\n\n for (Real t = xmin; t <= xmax; t += 0.125)\n {\n CHECK_ULP_CLOSE(t*t/2, sh(t), 50);\n CHECK_ULP_CLOSE(t, sh.prime(t), 300);\n }\n\n y.resize(10);\n for (size_t i = 0; i < y.size(); ++i)\n {\n y[i] = i*i/Real(2);\n }\n\n dydx.resize(y.size());\n for (size_t i = 0; i < y.size(); ++i)\n {\n dydx[i] = i;\n }\n\n d2ydx2.resize(y.size(), 1);\n d3ydx3.resize(y.size(), 0);\n\n Real x0 = 0;\n Real dx = 1;\n auto csh = cardinal_septic_hermite(std::move(y), std::move(dydx), std::move(d2ydx2), std::move(d3ydx3), x0, dx);\n for (Real t = x0; t <= 9; t += 0.125)\n {\n CHECK_ULP_CLOSE(t*t/2, csh(t), 24);\n CHECK_ULP_CLOSE(t, csh.prime(t), 24);\n CHECK_ULP_CLOSE(Real(1), csh.double_prime(t), 24);\n }\n\n std::vector> data(10);\n for (size_t i = 0; i < data.size(); ++i)\n {\n data[i][0] = i*i/Real(2);\n data[i][1] = i;\n data[i][2] = 1;\n data[i][3] = 0;\n }\n auto csh_aos = cardinal_septic_hermite_aos(std::move(data), x0, dx);\n for (Real t = x0; t <= 9; t += 0.125)\n {\n CHECK_ULP_CLOSE(t*t/2, csh_aos(t), 24);\n CHECK_ULP_CLOSE(t, csh_aos.prime(t), 24);\n CHECK_ULP_CLOSE(Real(1), csh_aos.double_prime(t), 24);\n }\n}\n\n\n\ntemplate\nvoid test_cubic()\n{\n\n std::vector x{0,1,2,3,4,5,6,7};\n Real xmax = x.back();\n std::vector y(x.size());\n for (size_t i = 0; i < y.size(); ++i)\n {\n y[i] = x[i]*x[i]*x[i];\n }\n\n std::vector dydx(x.size());\n for (size_t i = 0; i < y.size(); ++i)\n {\n dydx[i] = 3*x[i]*x[i];\n }\n\n std::vector d2ydx2(x.size());\n for (size_t i = 0; i < y.size(); ++i)\n {\n d2ydx2[i] = 6*x[i];\n }\n std::vector d3ydx3(x.size(), 6);\n\n auto sh = septic_hermite(std::move(x), std::move(y), std::move(dydx), std::move(d2ydx2), std::move(d3ydx3));\n\n for (Real t = 0; t <= xmax; t += 0.0078125)\n {\n CHECK_ULP_CLOSE(t*t*t, sh(t), 151);\n CHECK_ULP_CLOSE(3*t*t, sh.prime(t), 151);\n }\n\n Real x0 = 0;\n Real dx = 1;\n y.resize(8);\n dydx.resize(8);\n d2ydx2.resize(8);\n d3ydx3.resize(8,6);\n for (size_t i = 0; i < y.size(); ++i)\n {\n y[i] = i*i*i;\n dydx[i] = 3*i*i;\n d2ydx2[i] = 6*i;\n }\n\n auto csh = cardinal_septic_hermite(std::move(y), std::move(dydx), std::move(d2ydx2), std::move(d3ydx3), x0, dx);\n\n for (Real t = 0; t <= xmax; t += 0.0078125)\n {\n CHECK_ULP_CLOSE(t*t*t, csh(t), 151);\n CHECK_ULP_CLOSE(3*t*t, csh.prime(t), 151);\n CHECK_ULP_CLOSE(6*t, csh.double_prime(t), 151);\n }\n\n std::vector> data(8);\n for (size_t i = 0; i < data.size(); ++i) {\n data[i][0] = i*i*i;\n data[i][1] = 3*i*i;\n data[i][2] = 6*i;\n data[i][3] = 6;\n }\n\n auto csh_aos = cardinal_septic_hermite_aos(std::move(data), x0, dx);\n\n for (Real t = 0; t <= xmax; t += 0.0078125)\n {\n CHECK_ULP_CLOSE(t*t*t, csh_aos(t), 151);\n CHECK_ULP_CLOSE(3*t*t, csh_aos.prime(t), 151);\n CHECK_ULP_CLOSE(6*t, csh_aos.double_prime(t), 151);\n }\n}\n\ntemplate\nvoid test_quartic()\n{\n\n std::vector x{0,1,2,3,4,5,6,7,8,9};\n Real xmax = x.back();\n std::vector y(x.size());\n for (size_t i = 0; i < y.size(); ++i)\n {\n y[i] = x[i]*x[i]*x[i]*x[i];\n }\n\n std::vector dydx(x.size());\n for (size_t i = 0; i < y.size(); ++i)\n {\n dydx[i] = 4*x[i]*x[i]*x[i];\n }\n\n std::vector d2ydx2(x.size());\n for (size_t i = 0; i < y.size(); ++i)\n {\n d2ydx2[i] = 12*x[i]*x[i];\n }\n\n std::vector d3ydx3(x.size());\n for (size_t i = 0; i < y.size(); ++i)\n {\n d3ydx3[i] = 24*x[i];\n }\n\n auto sh = septic_hermite(std::move(x), std::move(y), std::move(dydx), std::move(d2ydx2), std::move(d3ydx3));\n\n for (Real t = 1; t <= xmax; t += 0.0078125) {\n CHECK_ULP_CLOSE(t*t*t*t, sh(t), 117);\n CHECK_ULP_CLOSE(4*t*t*t, sh.prime(t), 117);\n }\n\n y.resize(10);\n dydx.resize(10);\n d2ydx2.resize(10);\n d3ydx3.resize(10);\n for (size_t i = 0; i < y.size(); ++i)\n {\n y[i] = i*i*i*i;\n dydx[i] = 4*i*i*i;\n d2ydx2[i] = 12*i*i;\n d3ydx3[i] = 24*i;\n }\n\n auto csh = cardinal_septic_hermite(std::move(y), std::move(dydx), std::move(d2ydx2), std::move(d3ydx3), Real(0), Real(1));\n\n for (Real t = 1; t <= xmax; t += 0.0078125)\n {\n CHECK_ULP_CLOSE(t*t*t*t, csh(t), 117);\n CHECK_ULP_CLOSE(4*t*t*t, csh.prime(t), 117);\n CHECK_ULP_CLOSE(12*t*t, csh.double_prime(t), 117);\n }\n\n std::vector> data(10);\n for (size_t i = 0; i < data.size(); ++i)\n {\n data[i][0] = i*i*i*i;\n data[i][1] = 4*i*i*i;\n data[i][2] = 12*i*i;\n data[i][3] = 24*i;\n }\n\n auto csh_aos = cardinal_septic_hermite_aos(std::move(data), Real(0), Real(1));\n for (Real t = 1; t <= xmax; t += 0.0078125)\n {\n CHECK_ULP_CLOSE(t*t*t*t, csh_aos(t), 117);\n CHECK_ULP_CLOSE(4*t*t*t, csh_aos.prime(t), 117);\n CHECK_ULP_CLOSE(12*t*t, csh_aos.double_prime(t), 117);\n }\n}\n\n\ntemplate\nvoid test_interpolation_condition()\n{\n for (size_t n = 4; n < 50; ++n) {\n std::vector x(n);\n std::vector y(n);\n std::vector dydx(n);\n std::vector d2ydx2(n);\n std::vector d3ydx3(n);\n boost::random::mt19937 rd; \n boost::random::uniform_real_distribution dis(0,1);\n Real x0 = dis(rd);\n x[0] = x0;\n y[0] = dis(rd);\n for (size_t i = 1; i < n; ++i) {\n x[i] = x[i-1] + dis(rd);\n y[i] = dis(rd);\n dydx[i] = dis(rd);\n d2ydx2[i] = dis(rd);\n d3ydx3[i] = dis(rd);\n }\n\n auto x_copy = x;\n auto y_copy = y;\n auto dydx_copy = dydx;\n auto d2ydx2_copy = d2ydx2;\n auto d3ydx3_copy = d3ydx3;\n auto s = septic_hermite(std::move(x_copy), std::move(y_copy), std::move(dydx_copy), std::move(d2ydx2_copy), std::move(d3ydx3_copy));\n\n for (size_t i = 0; i < x.size(); ++i)\n {\n CHECK_ULP_CLOSE(y[i], s(x[i]), 2);\n CHECK_ULP_CLOSE(dydx[i], s.prime(x[i]), 2);\n }\n }\n}\n\n\nint main()\n{\n test_constant();\n test_linear();\n test_quadratic();\n test_cubic();\n test_quartic();\n test_interpolation_condition();\n\n test_constant();\n test_linear();\n test_quadratic();\n test_cubic();\n test_quartic();\n test_interpolation_condition();\n\n test_constant();\n test_linear();\n test_quadratic();\n test_cubic();\n test_quartic();\n test_interpolation_condition();\n\n#ifdef BOOST_HAS_FLOAT128\n test_constant();\n test_linear();\n test_quadratic();\n test_cubic();\n test_quartic();\n test_interpolation_condition();\n#endif\n\n return boost::math::test::report_errors();\n}\n", "meta": {"hexsha": "cd60acbe2c429bba37ad26db03fc2da298c49dba", "size": 15135, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "venv/boost_1_73_0/libs/math/test/septic_hermite_test.cpp", "max_stars_repo_name": "uosorio/heroku_face", "max_stars_repo_head_hexsha": "7d6465e71dba17a15d8edaef520adb2fcd09d91e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-07-12T13:52:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-12T13:52:18.000Z", "max_issues_repo_path": "3rdparty/boost_1_73_0/libs/math/test/septic_hermite_test.cpp", "max_issues_repo_name": "qingkouwei/mediaones", "max_issues_repo_head_hexsha": "cec475e1bfd5807b5351cc7e38d244ac5298ca16", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4.0, "max_issues_repo_issues_event_min_datetime": "2021-10-21T12:42:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-03T08:41:31.000Z", "max_forks_repo_path": "Libs/boost_1_76_0/libs/math/test/septic_hermite_test.cpp", "max_forks_repo_name": "Antd23rus/S2DE", "max_forks_repo_head_hexsha": "47cc7151c2934cd8f0399a9856c1e54894571553", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8.0, "max_forks_repo_forks_event_min_datetime": "2015-11-03T14:12:19.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-22T19:20:54.000Z", "avg_line_length": 28.5028248588, "max_line_length": 140, "alphanum_fraction": 0.5503138421, "num_tokens": 5353, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.835483553488848, "lm_q2_score": 0.5964331462646254, "lm_q1q2_score": 0.49831008445970304}} {"text": "// Copyright (c) 2020-2021 Franz Alt\n// This code is licensed under MIT license (see LICENSE.txt for details).\n\n#include \n\n#include \n#include \n#include \n#include \n\n#include \n\nnamespace {\n\ntemplate\ncvpg::histogram calc_hog_cell(std::shared_ptr image, std::size_t from_x, std::size_t to_x, std::size_t from_y, std::size_t to_y, bool l1_normalilze = true);\n\ntemplate<>\ncvpg::histogram calc_hog_cell(std::shared_ptr image, std::size_t from_x, std::size_t to_x, std::size_t from_y, std::size_t to_y, bool l1_normalize)\n{\n cvpg::histogram histogram(9);\n\n const std::uint8_t * raw = image->data(0).get();\n const std::size_t image_width = image->width();\n\n std::int16_t gx = 0;\n std::int16_t gy = 0;\n double mag = 0.0;\n double angle = 0.0;\n\n // correct from/to-values to calculate the gradients up to the image borders\n if (from_x > 1)\n {\n --from_x;\n }\n else\n {\n from_x = 1;\n }\n\n if (to_x < (image->width() - 2))\n {\n ++to_x;\n }\n else\n {\n to_x = image->width() - 2;\n }\n\n if (from_y > 1)\n {\n --from_y;\n }\n else\n {\n from_y = 1;\n }\n\n if (to_y < (image->height() - 2))\n {\n ++to_y;\n }\n else\n {\n to_y = image->height() - 2;\n }\n\n for (std::size_t y = from_y; y <= to_y; ++y)\n {\n for (std::size_t x = from_x; x <= to_x; ++x)\n {\n // calculate the x/y gradients\n gx = static_cast(raw[y * image_width + (x + 1)]) - static_cast(raw[y * image_width + (x - 1)]);\n gy = static_cast(raw[(y + 1) * image_width + x]) - static_cast(raw[(y - 1) * image_width + x]);\n\n // calculate the gradient magnitude\n mag = std::sqrt(static_cast(gx * gx + gy * gy));\n\n // calculate the absolute gradient angle\n angle = std::fabs((gx == 0) ? 0.0 : atan(static_cast(gy) / static_cast(gx)) * 180.0 / M_PI);\n\n const std::size_t bin = static_cast(std::floor(angle / 20.0));\n\n histogram.at(bin) += static_cast(mag);\n }\n }\n\n cvpg::histogram normalized_histogram(9);\n\n if (l1_normalize)\n {\n // normalize histogram (L1 norm)\n double sum = 0;\n\n for (auto const & h : histogram)\n {\n sum += std::fabs(static_cast(h));\n }\n\n for (std::size_t i = 0; i < histogram.bins(); ++i)\n {\n normalized_histogram.at(i) = static_cast(histogram.at(i)) / sum;\n }\n }\n else\n {\n // normalize histogram (L2 norm)\n double sum = 0;\n\n for (auto const & h : histogram)\n {\n auto h_ = std::fabs(static_cast(h));\n sum += h_ * h_;\n }\n\n sum = std::sqrt(sum);\n\n for (std::size_t i = 0; i < histogram.bins(); ++i)\n {\n normalized_histogram.at(i) = std::min(1.0, static_cast(histogram.at(i)) / sum);\n }\n }\n\n return normalized_histogram;\n}\n\ntemplate<>\ncvpg::histogram calc_hog_cell(std::shared_ptr image, std::size_t from_x, std::size_t to_x, std::size_t from_y, std::size_t to_y, bool l1_normalize)\n{\n // TODO implement me!\n\n return cvpg::histogram(9);\n}\n\ntemplate\nstruct hog_col_task : public boost::asynchronous::continuation_task > >\n{\n hog_col_task(std::shared_ptr image, std::size_t from_x, std::size_t to_x, std::size_t from_y, std::size_t to_y, std::size_t cell_dimension, std::size_t sequential_cells_per_row)\n : boost::asynchronous::continuation_task > >(\"hog_col\")\n , m_image(image)\n , m_from_x(from_x)\n , m_to_x(to_x)\n , m_from_y(from_y)\n , m_to_y(to_y)\n , m_cell_dimension(cell_dimension)\n , m_sequential_cells_per_row(sequential_cells_per_row)\n {}\n\n void operator()()\n {\n auto task_res = this->this_task_result();\n\n const std::size_t cells = (m_to_x - m_from_x + 1) / m_cell_dimension;\n\n if (cells <= m_sequential_cells_per_row)\n {\n std::vector > hogs;\n hogs.reserve(cells);\n\n for (std::size_t i = 0; i < cells; ++i)\n {\n hogs.push_back(calc_hog_cell(m_image, m_from_x + i * m_cell_dimension, m_from_x + (i + 1) * m_cell_dimension - 1, m_from_y, m_to_y));\n }\n\n task_res.set_value(std::move(hogs));\n }\n else\n {\n // determine middle of x-range dependet on the cell dimension\n const std::size_t x_half = m_from_x + (cells / 2) * m_cell_dimension;\n\n boost::asynchronous::create_callback_continuation(\n [task_res = std::move(task_res)](auto cont_res) mutable\n {\n try\n {\n auto h = std::move(std::get<1>(cont_res).get());\n\n std::vector > hogs(std::move(std::get<0>(cont_res).get()));\n hogs.insert(hogs.end(), h.begin(), h.end());\n\n task_res.set_value(std::move(hogs));\n }\n catch (...)\n {\n task_res.set_exception(std::current_exception());\n }\n },\n hog_col_task(m_image, m_from_x, x_half - 1, m_from_y, m_to_y, m_cell_dimension, m_sequential_cells_per_row),\n hog_col_task(m_image, x_half, m_to_x, m_from_y, m_to_y, m_cell_dimension, m_sequential_cells_per_row)\n );\n }\n }\n\nprivate:\n std::shared_ptr m_image;\n\n std::size_t m_from_x;\n std::size_t m_to_x;\n std::size_t m_from_y;\n std::size_t m_to_y;\n\n std::size_t m_cell_dimension;\n\n std::size_t m_sequential_cells_per_row;\n};\n\ntemplate\nstruct hog_row_task : public boost::asynchronous::continuation_task > >\n{\n hog_row_task(std::shared_ptr image, std::size_t from_x, std::size_t to_x, std::size_t from_y, std::size_t to_y, std::size_t cell_dimension, std::size_t sequential_cells_per_row)\n : boost::asynchronous::continuation_task > >(\"hog_row\")\n , m_image(image)\n , m_from_x(from_x)\n , m_to_x(to_x)\n , m_from_y(from_y)\n , m_to_y(to_y)\n , m_cell_dimension(cell_dimension)\n , m_sequential_cells_per_row(sequential_cells_per_row)\n {}\n\n void operator()()\n {\n auto task_res = this->this_task_result();\n\n const std::size_t cells = (m_to_y - m_from_y + 1) / m_cell_dimension;\n\n if (cells <= 1)\n {\n boost::asynchronous::create_callback_continuation(\n [task_res = std::move(task_res)](auto cont_res) mutable\n {\n try\n {\n task_res.set_value(std::move(std::get<0>(cont_res).get()));\n }\n catch (...)\n {\n task_res.set_exception(std::current_exception());\n }\n },\n hog_col_task(m_image, m_from_x, m_to_x, m_from_y, m_to_y, m_cell_dimension, m_sequential_cells_per_row)\n );\n }\n else\n {\n // determine middle of y-range dependet on the cell dimension\n const std::size_t y_half = m_from_y + (cells / 2) * m_cell_dimension;\n\n boost::asynchronous::create_callback_continuation(\n [task_res = std::move(task_res)](auto cont_res) mutable\n {\n try\n {\n auto h = std::move(std::get<1>(cont_res).get());\n\n std::vector > hogs(std::move(std::get<0>(cont_res).get()));\n hogs.insert(hogs.end(), h.begin(), h.end());\n\n task_res.set_value(std::move(hogs));\n }\n catch (...)\n {\n task_res.set_exception(std::current_exception());\n }\n },\n hog_row_task(m_image, m_from_x, m_to_x, m_from_y, y_half - 1, m_cell_dimension, m_sequential_cells_per_row),\n hog_row_task(m_image, m_from_x, m_to_x, y_half, m_to_y, m_cell_dimension, m_sequential_cells_per_row)\n );\n }\n }\n\nprivate:\n std::shared_ptr m_image;\n\n std::size_t m_from_x;\n std::size_t m_to_x;\n std::size_t m_from_y;\n std::size_t m_to_y;\n\n std::size_t m_cell_dimension;\n\n std::size_t m_sequential_cells_per_row;\n};\n\ntemplate\nstruct hog_task : public boost::asynchronous::continuation_task > >\n{\n hog_task(image_type image, std::size_t cell_dimension, std::size_t sequential_cells_per_row = 4)\n : boost::asynchronous::continuation_task > >(\"hog\")\n , m_image(std::make_shared(std::forward(image)))\n , m_cell_dimension(cell_dimension)\n , m_sequential_cells_per_row(sequential_cells_per_row)\n {}\n\n void operator()()\n {\n auto task_res = this->this_task_result();\n\n // determine the amount of rows and columns dependent on the desired cell dimension\n const std::size_t cols = m_image->width() / m_cell_dimension;\n const std::size_t rows = m_image->height() / m_cell_dimension;\n\n boost::asynchronous::create_callback_continuation(\n [task_res = std::move(task_res)](auto cont_res) mutable\n {\n try\n {\n task_res.set_value(std::move(std::get<0>(cont_res).get()));\n }\n catch (...)\n {\n task_res.set_exception(std::current_exception());\n }\n },\n hog_row_task(m_image, 0, m_image->width() - 1, 0, m_image->height() - 1, m_cell_dimension, m_sequential_cells_per_row)\n );\n }\n\nprivate:\n std::shared_ptr m_image;\n\n std::size_t m_cell_dimension;\n\n std::size_t m_sequential_cells_per_row;\n};\n\nvoid paint_hog_cell(std::shared_ptr image, cvpg::histogram const & histogram, std::size_t from_x, std::size_t to_x, std::size_t from_y, std::size_t to_y)\n{\n auto raw = image->data(0).get();\n const std::size_t image_width = image->width();\n\n const std::size_t half_x = from_x + (to_x - from_x) / 2;\n const std::size_t half_y = from_y + (to_y - from_y) / 2;\n\n auto draw_line_from_center =\n [raw, image_width, h_x = half_x, h_y = half_y](std::int32_t dx, std::int32_t dy, double increment)\n {\n // calculate start point\n const std::int32_t x_s = h_x - abs(dx);\n const std::int32_t y_s = h_y - abs(dy);\n\n // calculate end point\n const std::int32_t x_e = h_x + abs(dx);\n const std::int32_t y_e = h_y + abs(dy);\n\n if (abs(dx) > abs(dy))\n {\n // calculate the slope of the line\n const std::int32_t s = dy / dx;\n\n std::size_t y = y_s;\n\n for (std::size_t x = x_s; x <= x_e; ++x, y += s)\n {\n std::uint8_t * r = raw + y * image_width + x;\n\n *r = std::min(\n static_cast(255),\n static_cast(*r + increment * 255.0)\n );\n }\n }\n else\n {\n // calculate the slope of the line\n const std::int32_t s = dx / dy;\n\n std::size_t x = x_s;\n\n for (std::size_t y = y_s; y <= y_e; ++y, x += s)\n {\n std::uint8_t * r = raw + y * image_width + x;\n\n *r = std::min(\n static_cast(255),\n static_cast(*r + increment * 255.0)\n );\n }\n }\n };\n\n for (std::size_t i = 0; i < histogram.bins(); ++i)\n {\n auto const & h = histogram.at(i);\n\n if (h != 0.0)\n {\n if (i == 0)\n {\n // case: angle is at range [0..20) degree\n draw_line_from_center(0, 2, h);\n }\n else if (i == 1)\n {\n // case: angle it at range [20..40) degree\n draw_line_from_center(1, 2, h);\n }\n else if (i == 2)\n {\n // case: angle it at range [40..60) degree\n draw_line_from_center(2, 2, h);\n }\n else if (i == 3)\n {\n // case: angle it at range [60..80) degree\n draw_line_from_center(2, 1, h);\n }\n else if (i == 4)\n {\n // case: angle it at range [80..100) degree\n draw_line_from_center(2, 0, h);\n }\n else if (i == 5)\n {\n // case: angle it at range [100..120) degree\n draw_line_from_center(2, -1, h);\n }\n else if (i == 6)\n {\n // case: angle it at range [120..140) degree\n draw_line_from_center(2, -2, h);\n }\n else if (i == 7)\n {\n // case: angle it at range [140..160) degree\n draw_line_from_center(1, -2, h);\n }\n else if (i == 8)\n {\n // case: angle it at range [160..180) degree\n draw_line_from_center(0, -2, h);\n }\n }\n }\n}\n\nstruct hog_image_col_task : public boost::asynchronous::continuation_task\n{\n hog_image_col_task(std::shared_ptr image, std::shared_ptr > > histograms, std::size_t from_cell_col, std::size_t to_cell_col, std::size_t from_cell_row, std::size_t to_cell_row, std::size_t cell_dimension)\n : boost::asynchronous::continuation_task(\"hog_image_col\")\n , m_image(std::move(image))\n , m_histograms(std::move(histograms))\n , m_from_cell_col(from_cell_col)\n , m_to_cell_col(to_cell_col)\n , m_from_cell_row(from_cell_row)\n , m_to_cell_row(to_cell_row)\n , m_cell_dimension(cell_dimension)\n {}\n\n void operator()()\n {\n auto task_res = this->this_task_result();\n\n const std::size_t cells = m_to_cell_col - m_from_cell_col + 1;\n\n if (cells <= 1)\n {\n const std::size_t cells_per_row = m_image->width() / m_cell_dimension;\n\n // determine histogram for current cell\n cvpg::histogram const & h = m_histograms->at(m_from_cell_row * cells_per_row + m_from_cell_col);\n\n const std::size_t from_x = m_from_cell_col * m_cell_dimension;\n const std::size_t to_x = from_x + m_cell_dimension - 1;\n const std::size_t from_y = m_from_cell_row * m_cell_dimension;\n const std::size_t to_y = from_y + m_cell_dimension - 1;\n\n paint_hog_cell(m_image, h, from_x, to_x, from_y, to_y);\n\n task_res.set_value();\n }\n else\n {\n // determine middle of col-range\n const std::size_t col_half = m_from_cell_col + (cells / 2);\n\n boost::asynchronous::create_callback_continuation(\n [task_res = std::move(task_res)](auto cont_res) mutable\n {\n try\n {\n std::get<0>(cont_res).get();\n std::get<1>(cont_res).get();\n\n task_res.set_value();\n }\n catch (...)\n {\n task_res.set_exception(std::current_exception());\n }\n },\n hog_image_col_task(m_image, m_histograms, m_from_cell_col, col_half - 1, m_from_cell_row, m_to_cell_row, m_cell_dimension),\n hog_image_col_task(m_image, m_histograms, col_half, m_to_cell_col, m_from_cell_row, m_to_cell_row, m_cell_dimension)\n );\n }\n }\n\nprivate:\n std::shared_ptr m_image;\n\n std::shared_ptr > > m_histograms;\n\n std::size_t m_from_cell_col;\n std::size_t m_to_cell_col;\n std::size_t m_from_cell_row;\n std::size_t m_to_cell_row;\n\n std::size_t m_cell_dimension;\n};\n\nstruct hog_image_row_task : public boost::asynchronous::continuation_task\n{\n hog_image_row_task(std::shared_ptr image, std::shared_ptr > > histograms, std::size_t from_cell_col, std::size_t to_cell_col, std::size_t from_cell_row, std::size_t to_cell_row, std::size_t cell_dimension)\n : boost::asynchronous::continuation_task(\"hog_image_row\")\n , m_image(std::move(image))\n , m_histograms(std::move(histograms))\n , m_from_cell_col(from_cell_col)\n , m_to_cell_col(to_cell_col)\n , m_from_cell_row(from_cell_row)\n , m_to_cell_row(to_cell_row)\n , m_cell_dimension(cell_dimension)\n {}\n\n void operator()()\n {\n auto task_res = this->this_task_result();\n\n const std::size_t cells = m_to_cell_row - m_from_cell_row + 1;\n\n if (cells <= 1)\n {\n boost::asynchronous::create_callback_continuation(\n [task_res = std::move(task_res)](auto cont_res) mutable\n {\n try\n {\n std::get<0>(cont_res).get();\n\n task_res.set_value();\n }\n catch (...)\n {\n task_res.set_exception(std::current_exception());\n }\n },\n hog_image_col_task(m_image, m_histograms, m_from_cell_col, m_to_cell_col, m_from_cell_row, m_to_cell_row, m_cell_dimension)\n );\n }\n else\n {\n // determine middle of row-range\n const std::size_t row_half = m_from_cell_row + (cells / 2);\n\n boost::asynchronous::create_callback_continuation(\n [task_res = std::move(task_res)](auto cont_res) mutable\n {\n try\n {\n std::get<0>(cont_res).get();\n std::get<1>(cont_res).get();\n\n task_res.set_value();\n }\n catch (...)\n {\n task_res.set_exception(std::current_exception());\n }\n },\n hog_image_row_task(m_image, m_histograms, m_from_cell_col, m_to_cell_col, m_from_cell_row, row_half - 1, m_cell_dimension),\n hog_image_row_task(m_image, m_histograms, m_from_cell_col, m_to_cell_col, row_half, m_to_cell_row, m_cell_dimension)\n );\n }\n }\n\nprivate:\n std::shared_ptr m_image;\n\n std::shared_ptr > > m_histograms;\n\n std::size_t m_from_cell_col;\n std::size_t m_to_cell_col;\n std::size_t m_from_cell_row;\n std::size_t m_to_cell_row;\n\n std::size_t m_cell_dimension;\n};\n\nstruct hog_image_task : public boost::asynchronous::continuation_task\n{\n hog_image_task(std::vector > histograms, std::size_t cells_per_row, std::size_t cell_dimension)\n : boost::asynchronous::continuation_task(\"hog_image\")\n , m_histograms(std::make_shared > >(std::move(histograms)))\n , m_cells_per_row(cells_per_row)\n , m_cell_dimension(cell_dimension)\n {}\n\n void operator()()\n {\n auto task_res = this->this_task_result();\n\n const std::size_t cell_cols = m_cells_per_row;\n const std::size_t cell_rows = m_histograms->size() / cell_cols;\n\n // create a result image ...\n auto image = std::make_shared(cell_cols * m_cell_dimension, cell_rows * m_cell_dimension);\n\n // ... and paint it black\n memset(image->data(0).get(), 0, image->width() * image->height());\n\n boost::asynchronous::create_callback_continuation(\n [task_res = std::move(task_res), image](auto cont_res) mutable\n {\n try\n {\n std::get<0>(cont_res).get();\n\n task_res.set_value(std::move(*image));\n }\n catch (...)\n {\n task_res.set_exception(std::current_exception());\n }\n },\n hog_image_row_task(image, m_histograms, 0, cell_cols - 1, 0, cell_rows - 1, m_cell_dimension)\n );\n }\n\nprivate:\n std::shared_ptr > > m_histograms;\n\n std::size_t m_cells_per_row;\n\n std::size_t m_cell_dimension;\n};\n\n}\n\nnamespace cvpg::imageproc::algorithms {\n\nboost::asynchronous::detail::callback_continuation > > hog(image_gray_8bit image, std::size_t cell_dimension)\n{\n return boost::asynchronous::top_level_callback_continuation > >(\n hog_task(std::move(image), cell_dimension)\n );\n}\n\nboost::asynchronous::detail::callback_continuation > > hog(image_rgb_8bit image, std::size_t cell_dimension)\n{\n return boost::asynchronous::top_level_callback_continuation > >(\n hog_task(std::move(image), cell_dimension)\n );\n}\n\nboost::asynchronous::detail::callback_continuation hog_image(std::vector > histograms, std::size_t cells_per_row, std::size_t cell_dimension)\n{\n return boost::asynchronous::top_level_callback_continuation(\n hog_image_task(std::move(histograms), cells_per_row, cell_dimension)\n );\n}\n\nboost::asynchronous::detail::callback_continuation hog_image(image_gray_8bit image)\n{\n const std::size_t cell_dimension = 8;\n\n auto cells_per_row = image.width() / cell_dimension;\n\n return boost::asynchronous::then(\n hog(std::move(image), cell_dimension),\n [cells_per_row, cell_dimension](auto cont_res)\n {\n return hog_image(std::move(cont_res.get()), cells_per_row, cell_dimension);\n }\n );\n}\n\nboost::asynchronous::detail::callback_continuation hog_image(image_rgb_8bit image)\n{\n const std::size_t cell_dimension = 8;\n\n auto cells_per_row = image.width() / cell_dimension;\n\n return boost::asynchronous::then(\n hog(std::move(image), cell_dimension),\n [cells_per_row, cell_dimension](auto cont_res)\n {\n return hog_image(std::move(cont_res.get()), cells_per_row, cell_dimension);\n }\n );\n}\n\n} // namespace cvpg::imageproc::algorithms\n", "meta": {"hexsha": "2cf4c3018e55b16e995e786def0c9ecbf4277536", "size": 23784, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/libcvpg/imageproc/algorithms/hog.cpp", "max_stars_repo_name": "franz-alt/cv-playground", "max_stars_repo_head_hexsha": "d6c3bbdb500bf121c28299d117e459730b2b912d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/libcvpg/imageproc/algorithms/hog.cpp", "max_issues_repo_name": "franz-alt/cv-playground", "max_issues_repo_head_hexsha": "d6c3bbdb500bf121c28299d117e459730b2b912d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/libcvpg/imageproc/algorithms/hog.cpp", "max_forks_repo_name": "franz-alt/cv-playground", "max_forks_repo_head_hexsha": "d6c3bbdb500bf121c28299d117e459730b2b912d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3203463203, "max_line_length": 268, "alphanum_fraction": 0.5598301379, "num_tokens": 5848, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8354835452961427, "lm_q2_score": 0.5964331462646254, "lm_q1q2_score": 0.49831007957330203}} {"text": "// Copyright Yamaha 2021\n// MIT License\n// https://github.com/yamaha-bps/cbr_control/blob/master/LICENSE\n\n#ifndef CBR_CONTROL__MPC__DLTV_OCP_HPP_\n#define CBR_CONTROL__MPC__DLTV_OCP_HPP_\n\n#include \n\n#include \n\n#include \n\n#include \"ocp_common.hpp\"\n\nnamespace cbr\n{\n\n/* ---------------------------------------------------------------------------------------------- */\n/* Continuous Time Varying Linear Optimal Control Problem Discretizer */\n/* ---------------------------------------------------------------------------------------------- */\n\ntemplate\nclass DltvOcp\n{\npublic:\n // Must be defined in ctlv_pb problem\n constexpr static std::size_t nx = cltv_pb_t::nx;\n constexpr static std::size_t nu = cltv_pb_t::nu;\n constexpr static std::size_t nPts = _nPts;\n\n using problem_t = cltv_pb_t;\n\n // Create some useful aliases\n using state_t = Eigen::Matrix;\n using input_t = Eigen::Matrix;\n using time_t = Eigen::Matrix;\n using A_t = Eigen::Matrix;\n using B_t = Eigen::Matrix;\n using Q_t = Eigen::Matrix;\n using R_t = Eigen::Matrix;\n\n // Get return type of problem functions\n using Ar_t = std::result_of_t;\n using Br_t = std::result_of_t;\n using Qr_t = std::result_of_t;\n using QTr_t = std::result_of_t;\n using Rr_t = std::result_of_t;\n\n /* -------------------------------------------------------------------------- */\n /* Optionals */\n /* -------------------------------------------------------------------------- */\n\n // Check existance of get_E function\n constexpr static bool has_E_approx = std::experimental::is_detected_v<\n ocp_detail::has_E_continuous, cltv_pb_t>;\n constexpr static bool has_E = std::experimental::is_detected_exact_v<\n state_t, ocp_detail::has_E_continuous, cltv_pb_t>||\n std::experimental::is_detected_exact_v<\n const state_t &, ocp_detail::has_E_continuous, cltv_pb_t>;\n using Er_t = std::experimental::detected_or_t<\n state_t, ocp_detail::has_E_continuous, cltv_pb_t>;\n static_assert(\n !(has_E_approx && !has_E),\n \"Detected get_E function doesn't have a correct return type. \"\n \"It must be an nx*1 Eigen::Matrix (or a const reference to one)\");\n\n // Check existance of get_q function\n constexpr static bool has_q_approx = std::experimental::is_detected_v<\n ocp_detail::has_q_continuous, cltv_pb_t>;\n constexpr static bool has_q = std::experimental::is_detected_exact_v<\n state_t, ocp_detail::has_q_continuous, cltv_pb_t>||\n std::experimental::is_detected_exact_v<\n const state_t &, ocp_detail::has_q_continuous, cltv_pb_t>;\n using qr_t = std::experimental::detected_or_t<\n state_t, ocp_detail::has_q_continuous, cltv_pb_t>;\n static_assert(\n !(has_q_approx && !has_q),\n \"Detected get_q function doesn't have a correct return type. \"\n \"It must be an nx*1 Eigen::Matrix (or a const reference to one)\");\n\n // Check existance of get_qT function\n constexpr static bool has_qT_approx = std::experimental::is_detected_v<\n ocp_detail::has_qT_continuous, cltv_pb_t>;\n constexpr static bool has_qT = std::experimental::is_detected_exact_v<\n state_t, ocp_detail::has_qT_continuous, cltv_pb_t>||\n std::experimental::is_detected_exact_v<\n const state_t &, ocp_detail::has_qT_continuous, cltv_pb_t>;\n\n using qTr_t = std::experimental::detected_or_t<\n state_t, ocp_detail::has_qT_continuous, cltv_pb_t>; \\\n static_assert(\n !(has_qT_approx && !has_qT),\n \"Detected get_qT function doesn't have a correct return type. \"\n \"It must be an nx*1 Eigen::Matrix (or a const reference to one)\");\n\n // Check existance of get_r function\n constexpr static bool has_r_approx = std::experimental::is_detected_v<\n ocp_detail::has_r_continuous, cltv_pb_t>;\n constexpr static bool has_r = std::experimental::is_detected_exact_v<\n input_t, ocp_detail::has_r_continuous, cltv_pb_t>||\n std::experimental::is_detected_exact_v<\n const input_t &, ocp_detail::has_r_continuous, cltv_pb_t>;\n using rr_t = std::experimental::detected_or_t<\n input_t, ocp_detail::has_r_continuous, cltv_pb_t>;\n static_assert(\n !(has_r_approx && !has_r),\n \"Detected get_r function doesn't have a correct return type. \"\n \"It must be an nu*1 Eigen::Matrix (or a const reference to one)\");\n\n // Check problem dimensions\n static_assert(nx > 0, \"Number of states must be > 0.\");\n static_assert(nu > 0, \"Number of inputs must be > 0.\");\n static_assert(nPts > 1, \"Number of trajectory points must be > 1.\");\n static_assert(exp_order < 20, \"Exponential order must be < 20.\");\n\n // Check return type of problem functions\n static_assert(\n std::is_same_v, A_t>,\n \"The get_A method of the problem must return an nx*nx Eigen::Matrix (or a reference to one).\");\n static_assert(\n std::is_same_v, B_t>,\n \"The get_B method of the problem must return an nx*nu Eigen::Matrix (or a reference to one).\");\n static_assert(\n std::is_same_v, Q_t>,\n \"The get_Q method of the problem must return an nx*nx Eigen::Matrix (or a reference to one).\");\n static_assert(\n std::is_same_v, state_t>,\n \"The get_q method of the problem must return an nx*1 Eigen::Matrix (or a reference to one).\");\n static_assert(\n std::is_same_v, Q_t>,\n \"The get_QT method of the problem must return an nx*nx Eigen::Matrix (or a reference to one).\");\n static_assert(\n std::is_same_v, state_t>,\n \"The get_qT method of the problem must return an nx*1 Eigen::Matrix (or a reference to one).\");\n static_assert(\n std::is_same_v, R_t>,\n \"The get_R method of the problem must return an nu*nu Eigen::Matrix (or a reference to one).\");\n static_assert(\n std::is_same_v, input_t>,\n \"The get_r method of the problem must return an nu*1 Eigen::Matrix (or a reference to one).\");\n\npublic:\n DltvOcp() = delete;\n DltvOcp(const DltvOcp &) = default;\n DltvOcp(DltvOcp &&) = default;\n DltvOcp & operator=(const DltvOcp &) = default;\n DltvOcp & operator=(DltvOcp &&) = default;\n\n explicit DltvOcp(const cltv_pb_t & pb)\n : cltv_pb_(pb),\n dt_{compute_dt()}\n {}\n\n explicit DltvOcp(cltv_pb_t && pb)\n : cltv_pb_(std::move(pb)),\n dt_{compute_dt()}\n {}\n\n template\n DltvOcp(T1 && pb)\n : cltv_pb_(std::forward(pb)),\n dt_{compute_dt()}\n {}\n\n void get_x0(Eigen::Ref x0) const\n {\n cltv_pb_.get_x0(x0);\n }\n\n void get_state_lb(std::size_t k, Eigen::Ref state_lb) const\n {\n cltv_pb_.get_state_lb(indexToTime(k), state_lb);\n }\n\n void get_state_ub(std::size_t k, Eigen::Ref state_ub) const\n {\n cltv_pb_.get_state_ub(indexToTime(k), state_ub);\n }\n\n void get_input_lb(std::size_t k, Eigen::Ref input_lb) const\n {\n cltv_pb_.get_input_lb(indexToTime(k), input_lb);\n }\n\n void get_input_ub(std::size_t k, Eigen::Ref input_ub) const\n {\n cltv_pb_.get_input_ub(indexToTime(k), input_ub);\n }\n\n A_t get_A(std::size_t k) const\n {\n // define identity for order = 0\n A_t expA = A_t::Identity();\n\n if constexpr (exp_order > 0) {\n const A_t Adt = dt_ * cltv_pb_.get_A(indexToTime(k));\n expA += Adt;\n if constexpr (exp_order > 1) {\n double c = 1.;\n A_t Adtp = Adt;\n for (std::size_t i = 2; i <= exp_order; i++) {\n Adtp *= Adt;\n c /= static_cast(i);\n expA += c * Adtp;\n }\n }\n }\n\n return expA;\n }\n\n B_t get_B(std::size_t k) const\n {\n // define identity for order = 0\n A_t expA = A_t::Identity();\n\n if constexpr (exp_order > 0) {\n const A_t Adt = dt_ * cltv_pb_.get_A(indexToTime(k));\n expA += Adt / 2.;\n if constexpr (exp_order > 1) {\n double c = 0.5;\n A_t Adtp = Adt / 2.;\n for (std::size_t i = 2; i <= exp_order; i++) {\n Adtp *= Adt;\n c /= static_cast(i + 1);\n expA += c * Adtp;\n }\n }\n }\n\n B_t expB = expA * cltv_pb_.get_B(indexToTime(k)) * dt_;\n\n return expB;\n }\n\n template\n state_t get_E(std::enable_if_t k)\n {\n // define identity for order = 0\n A_t expA = A_t::Identity();\n\n if constexpr (exp_order > 0) {\n const A_t Adt = dt_ * cltv_pb_.get_A(indexToTime(k));\n expA += Adt / 2.;\n if constexpr (exp_order > 1) {\n double c = 0.5;\n A_t Adtp = Adt / 2.;\n for (std::size_t i = 2; i <= exp_order; i++) {\n Adtp *= Adt;\n c /= static_cast(i + 1);\n expA += c * Adtp;\n }\n }\n }\n\n state_t expE = expA * cltv_pb_.get_E(indexToTime(k)) * dt_;\n return expE;\n }\n\n Q_t get_Q(std::size_t k) const\n {\n return dt_ * cltv_pb_.get_Q(indexToTime(k));\n }\n\n template\n state_t get_q(std::enable_if_t k) const\n {\n return dt_ * cltv_pb_.get_q(indexToTime(k));\n }\n\n R_t get_R(std::size_t k) const\n {\n return dt_ * cltv_pb_.get_R(indexToTime(k));\n }\n\n template\n input_t get_r(std::enable_if_t k) const\n {\n return dt_ * cltv_pb_.get_r(indexToTime(k));\n }\n\n Q_t get_QT() const\n {\n double T;\n cltv_pb_.get_T(T);\n return cltv_pb_.get_QT();\n }\n\n template\n state_t get_qT([[maybe_unused]] std::enable_if_t k = nullptr) const\n {\n double TT;\n cltv_pb_.get_T(TT);\n return cltv_pb_.get_qT();\n }\n\n\n cltv_pb_t & problem()\n {\n return cltv_pb_;\n }\n\n double indexToTime(std::size_t k) const\n {\n return static_cast(k) * dt_;\n }\n\nprotected:\n double compute_dt()\n {\n double T;\n cltv_pb_.get_T(T);\n return T / static_cast(nPts - 1);\n }\n\nprotected:\n cltv_pb_t cltv_pb_{};\n double dt_{};\n};\n\n// Class template argument deduction guides\ntemplate\nDltvOcp(T)->DltvOcp;\n\ntemplate\nDltvOcp(T1, T2)->DltvOcp;\n\n} // namespace cbr\n\n\n#endif // CBR_CONTROL__MPC__DLTV_OCP_HPP_\n", "meta": {"hexsha": "b36bf0b0302f8f862f062a9927cf467ce40f091f", "size": 10503, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/cbr_control/mpc/dltv_ocp.hpp", "max_stars_repo_name": "yamaha-bps/cbr_control", "max_stars_repo_head_hexsha": "c2faf79673d46c950dd7590f1072fc7decafad06", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/cbr_control/mpc/dltv_ocp.hpp", "max_issues_repo_name": "yamaha-bps/cbr_control", "max_issues_repo_head_hexsha": "c2faf79673d46c950dd7590f1072fc7decafad06", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/cbr_control/mpc/dltv_ocp.hpp", "max_forks_repo_name": "yamaha-bps/cbr_control", "max_forks_repo_head_hexsha": "c2faf79673d46c950dd7590f1072fc7decafad06", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5405405405, "max_line_length": 100, "alphanum_fraction": 0.6406740931, "num_tokens": 3113, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8354835411997897, "lm_q2_score": 0.5964331462646254, "lm_q1q2_score": 0.4983100771301014}} {"text": "\n// Copyright Gavin Band 2008 - 2012.\n// Distributed under the Boost Software License, Version 1.0.\n// (See accompanying file LICENSE_1_0.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt)\n\n#include \n#include \n#include \n#include \n#include \"config/config.hpp\"\n#if HAVE_CBLAS\n\t#include \"cblas.h\"\n#endif\n#include \"Eigen/Core\"\n#include \"Eigen/Eigenvalues\"\n#include \"genfile/VariantIdentifyingData.hpp\"\n#include \"genfile/VariantDataReader.hpp\"\n#include \"genfile/vcf/get_set_eigen.hpp\"\n#include \"appcontext/get_current_time_as_string.hpp\"\n#include \"components/RelatednessComponent/PCALoadingComputer.hpp\"\n#include \"components/RelatednessComponent/LapackEigenDecomposition.hpp\"\n#include \"components/RelatednessComponent/mean_centre_genotypes.hpp\"\n\n// #define DEBUG_PCA_LOADING_COMPUTER 1\n\nnamespace {\n\ttemplate< typename Vector1, typename Vector2, typename NonMissingVector >\n\tdouble compute_correlation( Vector1 const& v1, Vector2 const& v2, NonMissingVector const& non_missingness_indicator ) {\n\t\tassert( v1.size() == v2.size() ) ;\n\t\tdouble non_missingness = non_missingness_indicator.sum() ;\n\t\tdouble mean1 = 0.0 ;\n\t\tdouble mean2 = 0.0 ;\n\t\tfor( int i = 0; i < v1.size(); ++i ) {\n\t\t\tif( non_missingness_indicator( i )) {\n\t\t\t\tmean1 += v1(i) / non_missingness ;\n\t\t\t\tmean2 += v2(i) / non_missingness ;\n\t\t\t}\n\t\t}\n\n\t\tdouble covariance = 0.0 ;\n\t\tdouble variance1 = 0.0 ;\n\t\tdouble variance2 = 0.0 ;\n\t\tfor( int i = 0; i < v1.size(); ++i ) {\n\t\t\tif( non_missingness_indicator( i )) {\n\t\t\t\tcovariance += ( v1(i) - mean1 ) * ( v2(i) - mean2 ) ;\n\t\t\t\tvariance1 += ( v1(i) - mean1 ) * ( v1(i) - mean1 ) ;\n\t\t\t\tvariance2 += ( v2(i) - mean2 ) * ( v2(i) - mean2 ) ;\n\t\t\t}\n\t\t}\n\t\t\n\t\t// We should divide the covariance by N-1 and also\n\t\t// divide each variance by the same quantity.\n\t\t// But this washes out in the ratio.\n\t\t\n\t\treturn covariance / std::sqrt( variance1 * variance2 ) ;\n\t}\n\t\n\tstd::string eigenvector_column_names( std::size_t N, std::string const& string1, std::string const& string2, std::size_t i ) {\n\t\tif( i < N ) {\n\t\t\treturn string1 + genfile::string_utils::to_string( i+1 ) ;\n\t\t}\n\t\telse {\n\t\t\treturn string2 + genfile::string_utils::to_string( i+1 -N ) ;\n\t\t}\n\t}\n}\n\nPCALoadingComputer::PCALoadingComputer( int number_of_loadings ):\n\tm_number_of_loadings( number_of_loadings ),\n\tm_number_of_snps( 1 )\n{}\n\nvoid PCALoadingComputer::set_UDUT( std::size_t number_of_snps, Matrix const& udut ) {\n\tassert( udut.cols() == udut.rows() + 1 ) ;\n\tint n = std::min( int( m_number_of_loadings ), int( udut.rows() ) ) ;\n\tm_D = udut.block( 0, 0, n, 1 ) ;\n\tm_sqrt_D_inverse = 1 / m_D.array().sqrt() ;\n\tm_U = udut.block( 0, 1, udut.rows(), n ) ;\n\tm_number_of_snps = number_of_snps ;\n}\n\nvoid PCALoadingComputer::begin_processing_snps( std::size_t number_of_samples, genfile::SNPDataSource::Metadata const& ) {\n\tassert( number_of_samples = std::size_t( m_U.rows() )) ;\n\tm_genotype_calls.resize( number_of_samples ) ;\n\tm_non_missingness.resize( number_of_samples ) ;\n}\n\nvoid PCALoadingComputer::processed_snp( genfile::VariantIdentifyingData const& snp, genfile::VariantDataReader& data_reader ) {\n\tdata_reader.get(\n\t\t\":genotypes:\",\n\t\tgenfile::vcf::get_threshholded_calls( m_genotype_calls, m_non_missingness, 0.9, 0, 0, 1, 2 )\n\t) ;\n\tassert( m_genotype_calls.size() == m_U.rows() ) ;\n\tassert( m_non_missingness.size() == m_U.rows() ) ;\n\t// setup the storage\n\tm_loading_vectors.resize( 2 * m_D.rows() ) ;\n\tm_loading_vectors.setConstant( std::numeric_limits< double >::quiet_NaN() ) ;\n\tdouble const allele_frequency = m_genotype_calls.sum() / ( 2.0 * m_non_missingness.sum() ) ;\n\tif( m_non_missingness.sum() > 0 && allele_frequency > 0.001 ) {\n\t\t//std::cerr << \"pre-mean genotypes are: \" << m_genotype_calls.head( 20 ).transpose() << \"...\\n\" ;\n\t\tpca::mean_centre_genotypes( &m_genotype_calls, m_non_missingness, allele_frequency ) ;\n\t\tm_genotype_calls /= std::sqrt( 2.0 * allele_frequency * ( 1.0 - allele_frequency ) ) ;\n\n#if DEBUG_PCA_LOADING_COMPUTER\n\t\t//std::cerr << \" SNP: \" << snp << \", allele frequency = \" << allele_frequency << \".\\n\" ;\n\t\t//std::cerr << std::resetiosflags( std::ios::floatfield ) << std::setprecision( 5 ) ;\n\t\t//std::cerr << \"pre-scale genotypes are: \" << m_genotype_calls.head( 20 ).transpose() << \"...\\n\" ;\n\t\tstd::cerr << \" genotypes are: \" << m_genotype_calls.head( 20 ).transpose() << \"...\\n\" ;\n\t\tstd::cerr << \" non-missingness is: \" << m_non_missingness.head( 20 ).transpose() << \"...\\n\" ;\n\t\tstd::cerr << \" U is: \" << m_U.block(0,0,10,10) << \"...\\n\" ;\n\t\tstd::cerr << \" D is: \" << m_D << \"...\\n\" ;\n#endif // DEBUG_PCA_LOADING_COMPUTER\n\t\n\n\t\t//\n\t\t// Let X be the L\\times n matrix (L SNPs, n samples) of (mean-centred, scaled) genotypes. We want\n\t\t// to compute the row of the matrix S of unit eigenvectors of the variance-covariance matrix\n\t\t// (1/L) X X^t that corresponds to the current SNP.\n\t\t// The matrix S is given by\n\t\t// \n\t\t// S = (1/\u221aL) X U D^{-\u00bd}\n\t\t//\n\t\t// where\n\t\t// (1/L) X^t X = U D U^t\n\t\t// is the eigenvalue decomposition of (1/L) X^t X that we are passed in via set_UDUT (and L is the number of SNPs).\n\t\t//\n\t\t// This is true since then\n\t\t//\n\t\t// S^t S = D^{-\u00bd} U^t (1/L) X^t X U D^{-\u00bd} = id\n\t\t//\n\t\t// (so columns of S are orthogonal) while\n\t\t//\n\t\t// (1/L X X^t) S = (1/L\u221aL) X X^t X U D^{-\u00bd}\n\t\t// = (1/\u221aL) X U D U^t U D^{-\u00bd}\n\t\t// = (1/\u221aL) X U D^\u00bd\n\t\t// = SD\n\t\t//\n\t\t// (so columns of S are eigenvectors with eigenvalues given by D.)\n\t\t//\n#if 0\n\t\tm_loading_vectors.segment( 0, m_U.cols() ) =\n\t\t\t( m_genotype_calls.transpose() * m_U ) * m_D.array().sqrt().matrix().asDiagonal()\n\t\t\t/ ( ( m_D.transpose().array() * m_number_of_snps ).sqrt() ) ;\n#else\n\t\tm_loading_vectors.segment( 0, m_U.cols() ) =\n\t\t\t( m_genotype_calls.transpose() * m_U ) * m_sqrt_D_inverse.asDiagonal() ;\n\t\tm_loading_vectors /= std::sqrt( m_number_of_snps ) ;\n#endif\n\t\t// We also wish to compute the correlation between the SNP and the PCA component.\n\t\t// With S as above, the PCA components are the projections of columns of X onto columns of S.\n\t\t// If we want samples to correspond to columns, this is\n\t\t// S^t X \n\t\t// which can be re-written\n\t\t// sqrt(L) U D^{1/2}\n\t\t// i.e. we may as well compute the correlation with columns of U.\n\t\tif( m_non_missingness.sum() > 10 ) {\n\t\t\tfor( int i = 0; i < m_U.cols(); ++i ) {\n\t\t\t\tm_loading_vectors( m_U.cols() + i ) = compute_correlation( m_genotype_calls, m_U.col( i ), m_non_missingness ) ;\n\t\t\t}\n\t\t}\n\t}\n\tsend_results(\n\t\tsnp,\n\t\tm_non_missingness.sum(),\n\t\tallele_frequency,\n\t\tm_loading_vectors,\n\t\tboost::bind(\n\t\t\t&eigenvector_column_names,\n\t\t\tm_U.cols(),\n\t\t\t\"eigenvector_\",\n\t\t\t\"correlation_\",\n\t\t\t_1\n\t\t)\n\t) ;\n}\n\nvoid PCALoadingComputer::send_results_to( ResultCallback callback ) {\n\tm_result_signal.connect( callback ) ;\n}\n\nvoid PCALoadingComputer::send_results( genfile::VariantIdentifyingData const& snp, double const N, double const frequency, Eigen::VectorXd const& data, GetNames get_names ) {\n\tm_result_signal( snp, N, frequency, data, get_names ) ;\n}\n\nstd::string PCALoadingComputer::get_metadata() const {\n\tusing namespace genfile::string_utils ;\n\treturn \"Number of SNPs: \" + to_string( m_number_of_snps ) + \"\\n\"\n\t\t+ \"Number of samples: \" + to_string( m_U.rows() ) + \"\\n\"\n\t\t+ \"These loadings represent unit eigenvectors of the variance-covariance matrix\\n\"\n\t\t+ \" 1/L X X^t\\n\"\n\t\t+ \"where X is the LxN matrix of genotypes at L SNPs and N samples (normalised across rows.)\" ;\n}\n\nvoid PCALoadingComputer::end_processing_snps() {}\n\n\n\n", "meta": {"hexsha": "7e78325e751419e7810949cd485fea5e68ead6ba", "size": 7582, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "components/RelatednessComponent/src/PCALoadingComputer.cpp", "max_stars_repo_name": "CreRecombinase/qctool", "max_stars_repo_head_hexsha": "6dad3a15c461177bf6940ba7b991337402ca5c41", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2021-04-21T05:42:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-26T14:59:43.000Z", "max_issues_repo_path": "components/RelatednessComponent/src/PCALoadingComputer.cpp", "max_issues_repo_name": "CreRecombinase/qctool", "max_issues_repo_head_hexsha": "6dad3a15c461177bf6940ba7b991337402ca5c41", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2020-04-09T16:11:04.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-10T11:18:56.000Z", "max_forks_repo_path": "components/RelatednessComponent/src/PCALoadingComputer.cpp", "max_forks_repo_name": "gavinband/qctool", "max_forks_repo_head_hexsha": "8d8adb45151c91f953fe4a9af00498073b1132ba", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.1005025126, "max_line_length": 174, "alphanum_fraction": 0.6582695859, "num_tokens": 2384, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8354835289107307, "lm_q2_score": 0.5964331462646255, "lm_q1q2_score": 0.49831006980049936}} {"text": "#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"H5Cpp.h\"\n#include \"rapidjson/document.h\"\n#include \"rapidjson/writer.h\"\n#include \"rapidjson/stringbuffer.h\"\n \nusing std::cout;\nusing std::endl;\n\nusing namespace H5;\nusing namespace rapidjson;\nusing Eigen::tanh;\nusing Eigen::MatrixXd;\n\ninline double min(double a, double b) { return(((a)<(b))?(a):(b));}\ninline double max(double a, double b) { return(((a)>(b))?(a):(b));}\n\ndouble hard_sigmoid(double x){\n return(max(0.0, min(1.0, x*0.2+0.5)));\n}\n\ndouble sigmoid(double x){\n return 1.0 / (1.0 + exp(-x));\n}\n\ninline MatrixXd Mult(MatrixXd input1, MatrixXd input2){\n return(input1.array() * input2.array());\n}\n\nchar* read_model(const char *filename)\n{\n char *arch_json;\n \n H5File file(filename, H5F_ACC_RDWR);\n Attribute attr(file.openAttribute(\"model_config\"));\n DataType type(attr.getDataType());\n \n \n attr.read(type, &arch_json);\n return arch_json;\n}\n\n\ninline MatrixXd readCSV(const char* filename, int row_n, int col_n){\n \n std::ifstream file(filename);\n int col_flag = 0;\n\n MatrixXd m(row_n, col_n);\n \n\n if(row_n == 1){\n col_flag = 1;\n m.resize(col_n, row_n);\n }\n\n \n std::string line;\n \n int row = 0;\n int col = 0;\n \n \n if(file.is_open()){\n while(std::getline(file, line)){\n char *ptr = (char *)line.c_str();\n int len = line.length();\n \n col = 0;\n \n char *start = ptr;\n for(int i = 0; i < len; i++){\n if(ptr[i] == ','){\n m(row, col++) = atof(start);\n start = ptr + i + 1;\n }\n }\n m(row, col) = atof(start);\n \n row++;\n }\n file.close(); \n }\n \n if(col_flag)\n return(m.transpose());\n\n return(m);\n}\n\nclass Embedding{\n public:\n int row;\n int col;\n int out_size;\n MatrixXd W;\n\n Embedding(){\n\n }\n\n Embedding(const char* filename, int row, int col){\n this->row = row;\n this->col = col;\n this->out_size = col;\n\n W.resize(this->row, this->col);\n this->W = readCSV(filename, this->row, this->col);\n \n }\n\n MatrixXd operator()(MatrixXd input){\n MatrixXd Out(input.cols(), this->out_size);\n for(int t=0; t < Out.rows(); t++)\n Out.row(t) = W.row( input(0, t) );\n\n return(Out);\n }\n};\n\nclass LSTM{\n public:\n int inp_size;\n int out_size;\n MatrixXd kernel;\n MatrixXd recurrent_kernel;\n MatrixXd bias;\n\n LSTM(){\n\n }\n\n LSTM(const char* kernel_filename, const char* recurrent_kernel_filename, const char* bias_filename, int inp_size, int out_size){\n this->inp_size = inp_size;\n this->out_size = out_size;\n\n kernel.resize(this->inp_size, this->out_size * 4);\n kernel = readCSV(kernel_filename, this->inp_size, this->out_size * 4);\n\n recurrent_kernel.resize(this->out_size, this->out_size * 4);\n recurrent_kernel = readCSV(recurrent_kernel_filename, this->out_size, this->out_size * 4); \n \n bias.resize(1, this->out_size * 4);\n bias = readCSV(bias_filename, 1, this->out_size * 4); \n\n }\n\n MatrixXd operator()(MatrixXd input){\n int LSTM_OUT = this->out_size;\n int MAXLEN = input.rows();\n\n MatrixXd C_t = MatrixXd::Zero(1, this->out_size);\n MatrixXd h_t = MatrixXd::Zero(1, this->out_size);\n \n for(int t = 0; t < MAXLEN; t++){\n MatrixXd tmp_out(1, 4*LSTM_OUT);\n MatrixXd i_t(1, LSTM_OUT);\n MatrixXd f_t(1, LSTM_OUT);\n MatrixXd o_t(1, LSTM_OUT);\n MatrixXd g_t(1, LSTM_OUT);\n \n // IFCO\n tmp_out = input.row(t) * kernel + h_t * recurrent_kernel + bias;\n \n i_t = (tmp_out.block(0, 0*LSTM_OUT, 1, LSTM_OUT)).unaryExpr(&hard_sigmoid);\n f_t = (tmp_out.block(0, 1*LSTM_OUT, 1, LSTM_OUT)).unaryExpr(&hard_sigmoid);\n o_t = (tmp_out.block(0, 3*LSTM_OUT, 1, LSTM_OUT)).unaryExpr(&hard_sigmoid);\n g_t = tanh((tmp_out.block(0, 2*LSTM_OUT, 1, LSTM_OUT)).array());\n \n C_t = f_t.array() * C_t.array() + i_t.array() * g_t.array();\n h_t = o_t.array() * tanh(C_t.array());\n }\n return h_t; \n } \n};\n\nclass GRU{\n public:\n int inp_size;\n int out_size;\n MatrixXd kernel;\n MatrixXd recurrent_kernel;\n MatrixXd bias;\n\n GRU(){\n\n }\n\n GRU(const char* kernel_filename, const char* recurrent_kernel_filename, const char* bias_filename, int inp_size, int out_size){\n this->inp_size = inp_size;\n this->out_size = out_size;\n\n kernel.resize(this->inp_size, this->out_size * 4);\n kernel = readCSV(kernel_filename, this->inp_size, this->out_size * 4);\n\n recurrent_kernel.resize(this->out_size, this->out_size * 4);\n recurrent_kernel = readCSV(recurrent_kernel_filename, this->out_size, this->out_size * 4); \n \n bias.resize(1, this->out_size * 4);\n bias = readCSV(bias_filename, 1, this->out_size * 4); \n\n }\n\n MatrixXd operator()(MatrixXd input){\n int LSTM_OUT = this->out_size;\n int MAXLEN = input.rows();\n\n MatrixXd hh_t = MatrixXd::Zero(1,LSTM_OUT);\n MatrixXd h_t = MatrixXd::Zero(1,LSTM_OUT);\n MatrixXd one_arr = MatrixXd::Ones(1,LSTM_OUT);\n \n MatrixXd tmp_out(1, 3*LSTM_OUT);\n MatrixXd tmp_W(1, LSTM_OUT);\n MatrixXd tmp_U(LSTM_OUT, LSTM_OUT);\n MatrixXd z_t(1, LSTM_OUT);\n MatrixXd r_t(1, LSTM_OUT);\n \n tmp_U = recurrent_kernel.block(0, 2*LSTM_OUT, LSTM_OUT, LSTM_OUT);\n \n for(int t = 0; t < MAXLEN; t++){\n \n // IFCO\n \n tmp_W = input.row(t) * kernel + bias ;\n tmp_out = tmp_W + h_t * recurrent_kernel;\n \n z_t = (tmp_out.block(0, 0*LSTM_OUT, 1, LSTM_OUT)).unaryExpr(&hard_sigmoid);\n r_t = (tmp_out.block(0, 1*LSTM_OUT, 1, LSTM_OUT)).unaryExpr(&hard_sigmoid);\n r_t = r_t.array() * h_t.array();\n \n hh_t = tanh((r_t * tmp_U + tmp_W.block(0, 2*LSTM_OUT, 1, LSTM_OUT)).array() );\n h_t = (one_arr - z_t).array() * hh_t.array() + z_t.array() * h_t.array(); \n \n }\n return h_t;\n } \n};\n\nclass Dense{\n public:\n int inp_size;\n int out_size;\n string activation;\n MatrixXd kernel;\n MatrixXd bias;\n\n Dense(){\n\n }\n\n Dense(const char* kernel_filename, const char* bias_filename, int inp_size, int out_size, string activation=\"linear\"){\n this->inp_size = inp_size;\n this->out_size = out_size;\n this->activation = activation;\n\n kernel.resize(this->inp_size, this->out_size);\n kernel = readCSV(kernel_filename, this->inp_size, this->out_size);\n\n bias.resize(1, this->out_size);\n bias = readCSV(bias_filename, 1, this->out_size);\n }\n\n MatrixXd operator()(MatrixXd input){\n\n if(this->activation == \"linear\")\n return( input * kernel + bias );\n else if(this->activation == \"sigmoid\")\n return( (input * kernel + bias).unaryExpr(&sigmoid) );\n }\n};\n", "meta": {"hexsha": "c45dce25572baf1d23d0cf5968cf779a781f4b87", "size": 7965, "ext": "cc", "lang": "C++", "max_stars_repo_path": "cc/module.cc", "max_stars_repo_name": "VishnuDuttSharma/KeLiPTo", "max_stars_repo_head_hexsha": "c5730fcb25dec199ce41b12e08ee442a00aa6c43", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2018-09-12T21:59:19.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-04T09:26:22.000Z", "max_issues_repo_path": "cc/module.cc", "max_issues_repo_name": "VishnuDuttSharma/KeLiPTo", "max_issues_repo_head_hexsha": "c5730fcb25dec199ce41b12e08ee442a00aa6c43", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cc/module.cc", "max_forks_repo_name": "VishnuDuttSharma/KeLiPTo", "max_forks_repo_head_hexsha": "c5730fcb25dec199ce41b12e08ee442a00aa6c43", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.5483870968, "max_line_length": 136, "alphanum_fraction": 0.521908349, "num_tokens": 1979, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8354835289107309, "lm_q2_score": 0.5964331462646254, "lm_q1q2_score": 0.4983100698004993}} {"text": "#include \n\nNTL_CLIENT\n\n\n#define TIME_IT(t, action) \\\ndo { \\\n double _t0, _t1; \\\n long _iter = 1; \\\n long _cnt = 0; \\\n do { \\\n _t0 = GetTime(); \\\n for (long _i = 0; _i < _iter; _i++) { action; _cnt++; } \\\n _t1 = GetTime(); \\\n } while ( _t1 - _t0 < 2 && (_iter *= 2)); \\\n t = (_t1 - _t0)/_iter; \\\n} while(0)\n\nvoid FillRandom(ZZX& f, long n, long k)\n{\n long sw = RandomBnd(2);\n f.SetLength(n);\n for (long i = 0; i < n; i++) {\n if (sw) {\n long kk = 1 + RandomBnd(k);\n RandomBits(f[i], kk);\n }\n else {\n long kk = RandomBnd(k);\n SetBit(f[i], kk);\n }\n if (RandomBnd(2)) NTL::negate(f[i], f[i]);\n }\n f.normalize();\n}\n\nint main()\n{\n\n for (long iter = 0; iter < 4000; iter++) {\n if (iter % 100 == 0) cerr << \".\";\n long na, nb, k;\n\n long sw = RandomBnd(3);\n\n if (sw == 0) {\n na = RandomBnd(20) + 1;\n nb = RandomBnd(20) + 1;\n k = RandomBnd(20) + 1;\n }\n else if (sw == 1) {\n na = RandomBnd(200) + 10;\n nb = RandomBnd(200) + 10;\n k = RandomBnd(200) + 10;\n }\n else {\n na = RandomBnd(3000) + 100;\n nb = RandomBnd(3000) + 100;\n k = RandomBnd(3000) + 100;\n }\n\n ZZX a, b, c, c1;\n FillRandom(a, na, k);\n FillRandom(b, nb, k);\n \n if (RandomBnd(2)) {\n SSMul(c, a, b);\n KarMul(c1, a, b);\n if (c != c1) Error(\"oops\");\n }\n else {\n SSSqr(c, a);\n KarSqr(c1, a);\n if (c != c1) Error(\"oops\");\n }\n }\n\n cerr << \"\\n\";\n}\n\n\n", "meta": {"hexsha": "9cd08b6e0a7f6a625e30cc77aa1e4b3cc73944b9", "size": 1571, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "homomorphic_evaluation/ntl-11.3.2/src/SSMulTest.cpp", "max_stars_repo_name": "dklee0501/PLDI_20_242_artifact_publication", "max_stars_repo_head_hexsha": "f2b73df9165c76e8b521d8ebd639d68321e3862b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 61.0, "max_stars_repo_stars_event_min_datetime": "2015-03-21T19:39:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-22T06:14:16.000Z", "max_issues_repo_path": "homomorphic_evaluation/ntl-11.3.2/src/SSMulTest.cpp", "max_issues_repo_name": "dklee0501/Lobster", "max_issues_repo_head_hexsha": "f2b73df9165c76e8b521d8ebd639d68321e3862b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 15.0, "max_issues_repo_issues_event_min_datetime": "2021-12-24T22:53:49.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-25T10:03:13.000Z", "max_forks_repo_path": "LibSource/ExtendedNTL/tests/SSMulTest.cpp", "max_forks_repo_name": "ekzyis/CrypTool-2", "max_forks_repo_head_hexsha": "1af234b4f74486fbfeb3b3c49228cc36533a8c89", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 14.0, "max_forks_repo_forks_event_min_datetime": "2016-01-16T07:59:49.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-17T10:27:23.000Z", "avg_line_length": 19.1585365854, "max_line_length": 63, "alphanum_fraction": 0.4322087842, "num_tokens": 588, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8198933535169629, "lm_q2_score": 0.6076631698328916, "lm_q1q2_score": 0.4982189941230373}} {"text": "#ifndef STAN_MATH_PRIM_MAT_PROB_CATEGORICAL_LOGIT_LPMF_HPP\n#define STAN_MATH_PRIM_MAT_PROB_CATEGORICAL_LOGIT_LPMF_HPP\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace stan {\n namespace math {\n\n // CategoricalLog(n|theta) [0 < n <= N, theta unconstrained], no checking\n template \n typename boost::math::tools::promote_args::type\n categorical_logit_lpmf(int n,\n const Eigen::Matrix&\n beta) {\n static const char* function(\"categorical_logit_lpmf\");\n\n check_bounded(function, \"categorical outcome out of support\", n,\n 1, beta.size());\n check_finite(function, \"log odds parameter\", beta);\n\n if (!include_summand::value)\n return 0.0;\n\n // FIXME: wasteful vs. creating term (n-1) if not vectorized\n return beta(n - 1) - log_sum_exp(beta); // == log_softmax(beta)(n-1);\n }\n\n template \n inline\n typename boost::math::tools::promote_args::type\n categorical_logit_lpmf(int n,\n const Eigen::Matrix&\n beta) {\n return categorical_logit_lpmf(n, beta);\n }\n\n template \n typename boost::math::tools::promote_args::type\n categorical_logit_lpmf(const std::vector& ns,\n const Eigen::Matrix&\n beta) {\n static const char* function(\"categorical_logit_lpmf\");\n\n for (size_t k = 0; k < ns.size(); ++k)\n check_bounded(function, \"categorical outcome out of support\",\n ns[k], 1, beta.size());\n check_finite(function, \"log odds parameter\", beta);\n\n if (!include_summand::value)\n return 0.0;\n\n if (ns.size() == 0)\n return 0.0;\n\n Eigen::Matrix log_softmax_beta\n = log_softmax(beta);\n\n // FIXME: replace with more efficient sum()\n Eigen::Matrix::type,\n Eigen::Dynamic, 1> results(ns.size());\n for (size_t i = 0; i < ns.size(); ++i)\n results[i] = log_softmax_beta(ns[i] - 1);\n return sum(results);\n }\n\n template \n inline\n typename boost::math::tools::promote_args::type\n categorical_logit_lpmf(const std::vector& ns,\n const Eigen::Matrix&\n beta) {\n return categorical_logit_lpmf(ns, beta);\n }\n\n }\n}\n#endif\n", "meta": {"hexsha": "a26e5e9e1b8856012aacad2f0f13e4500a35aca0", "size": 3097, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "cmdstan/stan/lib/stan_math/stan/math/prim/mat/prob/categorical_logit_lpmf.hpp", "max_stars_repo_name": "yizhang-cae/torsten", "max_stars_repo_head_hexsha": "dc82080ca032325040844cbabe81c9a2b5e046f9", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cmdstan/stan/lib/stan_math/stan/math/prim/mat/prob/categorical_logit_lpmf.hpp", "max_issues_repo_name": "yizhang-cae/torsten", "max_issues_repo_head_hexsha": "dc82080ca032325040844cbabe81c9a2b5e046f9", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cmdstan/stan/lib/stan_math/stan/math/prim/mat/prob/categorical_logit_lpmf.hpp", "max_forks_repo_name": "yizhang-cae/torsten", "max_forks_repo_head_hexsha": "dc82080ca032325040844cbabe81c9a2b5e046f9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.1931818182, "max_line_length": 78, "alphanum_fraction": 0.6193090087, "num_tokens": 796, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8198933359135361, "lm_q2_score": 0.6076631698328917, "lm_q1q2_score": 0.49821898342608323}} {"text": "/**\n * @file lsh_test.cpp\n *\n * Unit tests for the 'LSHSearch' class.\n */\n#include \n#include \n#include \n#include \"old_boost_test_definitions.hpp\"\n\n#include \n\nusing namespace std;\nusing namespace mlpack;\nusing namespace mlpack::neighbor;\n\nBOOST_AUTO_TEST_SUITE(LSHTest);\n\nBOOST_AUTO_TEST_CASE(LSHSearchTest)\n{\n // Force to specific random seed for these results.\n math::RandomSeed(0);\n\n // Precomputed hash width value.\n const double hashWidth = 4.24777;\n\n arma::mat rdata(2, 10);\n rdata << 3 << 2 << 4 << 3 << 5 << 6 << 0 << 8 << 3 << 1 << arma::endr <<\n 0 << 3 << 4 << 7 << 8 << 4 << 1 << 0 << 4 << 3 << arma::endr;\n\n arma::mat qdata(2, 3);\n qdata << 3 << 2 << 0 << arma::endr << 5 << 3 << 4 << arma::endr;\n\n // INPUT TO LSH:\n // Number of points: 10\n // Number of dimensions: 2\n // Number of projections per table: 'numProj' = 3\n // Number of hash tables: 'numTables' = 2\n // hashWidth (computed): 'hashWidth' = 4.24777\n // Second hash size: 'secondHashSize' = 11\n // Size of the bucket: 'bucketSize' = 3\n\n // Things obtained by random sampling listed in the sequences\n // as they will be obtained in the 'LSHSearch::BuildHash()' private function\n // in 'LSHSearch' class.\n //\n // 1. The weights of the second hash obtained as:\n // secondHashWeights = arma::floor(arma::randu(3) * 11.0);\n // COR.SOL.: secondHashWeights = [9, 4, 8];\n //\n // 2. The offsets for all the 3 projections in each of the 2 tables:\n // offsets.randu(3, 2)\n // COR.SOL.: [0.7984 0.3352; 0.9116 0.7682; 0.1976 0.2778]\n // offsets *= hashWidth\n // COR.SOL.: [3.3916 1.4240; 3.8725 3.2633; 0.8392 1.1799]\n //\n // 3. The (2 x 3) projection matrices for the 2 tables:\n // projMat.randn(2, 3)\n // COR.SOL.: Proj. Mat 1: [2.7020 0.0187 0.4355; 1.3692 0.6933 0.0416]\n // COR.SOL.: Proj. Mat 2: [-0.3961 -0.2666 1.1001; 0.3895 -1.5118 -1.3964]\n LSHSearch<> lsh_test(rdata, 3, 2, hashWidth, 11, 3);\n// LSHSearch<> lsh_test(rdata, qdata, 3, 2, 0.0, 11, 3);\n\n // Given this, the 'LSHSearch::bucketRowInHashTable' should be:\n // COR.SOL.: [2 11 4 7 6 3 11 0 5 1 8]\n //\n // The 'LSHSearch::bucketContentSize' should be:\n // COR.SOL.: [2 0 1 1 3 1 0 3 3 3 1]\n //\n // The final hash table 'LSHSearch::secondHashTable' should be\n // of size (3 x 9) with the following content:\n // COR.SOL.:\n // [0 2 4; 1 7 8; 3 9 10; 5 10 10; 6 10 10; 0 5 6; 1 2 8; 3 10 10; 4 10 10]\n\n arma::Mat neighbors;\n arma::mat distances;\n\n lsh_test.Search(qdata, 2, neighbors, distances);\n\n // The private function 'LSHSearch::ReturnIndicesFromTable(0, refInds)'\n // should hash the query 0 into the following buckets:\n // COR.SOL.: Table 1 Bucket 7, Table 2 Bucket 0, refInds = [0 2 3 4 9]\n //\n // The private function 'LSHSearch::ReturnIndicesFromTable(1, refInds)'\n // should hash the query 1 into the following buckets:\n // COR.SOL.: Table 1 Bucket 9, Table 2 Bucket 4, refInds = [1 2 7 8]\n //\n // The private function 'LSHSearch::ReturnIndicesFromTable(2, refInds)'\n // should hash the query 2 into the following buckets:\n // COR.SOL.: Table 1 Bucket 0, Table 2 Bucket 7, refInds = [0 2 3 4 9]\n\n // After search\n // COR.SOL.: 'neighbors' = [2 1 9; 3 8 2]\n // COR.SOL.: 'distances' = [2 0 2; 4 2 16]\n\n arma::Mat true_neighbors(2, 3);\n true_neighbors << 2 << 1 << 9 << arma::endr << 3 << 8 << 2 << arma::endr;\n arma::mat true_distances(2, 3);\n true_distances << 2 << 0 << 2 << arma::endr << 4 << 2 << 16 << arma::endr;\n\n for (size_t i = 0; i < 3; i++)\n {\n for (size_t j = 0; j < 2; j++)\n {\n// BOOST_REQUIRE_EQUAL(neighbors(j, i), true_neighbors(j, i));\n// BOOST_REQUIRE_CLOSE(distances(j, i), true_distances(j, i), 1e-5);\n }\n }\n}\n\nBOOST_AUTO_TEST_CASE(LSHTrainTest)\n{\n // This is a not very good test that simply checks that the re-trained LSH\n // model operates on the correct dimensionality and returns the correct number\n // of results.\n arma::mat referenceData = arma::randu(3, 100);\n arma::mat newReferenceData = arma::randu(10, 400);\n arma::mat queryData = arma::randu(10, 200);\n\n LSHSearch<> lsh(referenceData, 3, 2, 2.0, 11, 3);\n\n lsh.Train(newReferenceData, 4, 3, 3.0, 12, 4);\n\n arma::Mat neighbors;\n arma::mat distances;\n\n lsh.Search(queryData, 3, neighbors, distances);\n\n BOOST_REQUIRE_EQUAL(neighbors.n_cols, 200);\n BOOST_REQUIRE_EQUAL(neighbors.n_rows, 3);\n BOOST_REQUIRE_EQUAL(distances.n_cols, 200);\n BOOST_REQUIRE_EQUAL(distances.n_rows, 3);\n}\n\nBOOST_AUTO_TEST_CASE(EmptyConstructorTest)\n{\n // If we create an empty LSH model and then call Search(), it should throw an\n // exception.\n LSHSearch<> lsh;\n\n arma::mat dataset = arma::randu(5, 50);\n arma::mat distances;\n arma::Mat neighbors;\n BOOST_REQUIRE_THROW(lsh.Search(dataset, 2, neighbors, distances),\n std::invalid_argument);\n\n // Now, train.\n lsh.Train(dataset, 4, 3, 3.0, 12, 4);\n\n lsh.Search(dataset, 3, neighbors, distances);\n\n BOOST_REQUIRE_EQUAL(neighbors.n_cols, 50);\n BOOST_REQUIRE_EQUAL(neighbors.n_rows, 3);\n BOOST_REQUIRE_EQUAL(distances.n_cols, 50);\n BOOST_REQUIRE_EQUAL(distances.n_rows, 3);\n}\n\nBOOST_AUTO_TEST_SUITE_END();\n", "meta": {"hexsha": "70c132d142a269bc0d4826fcf1478e07d806ed92", "size": 5293, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/mlpack/tests/lsh_test.cpp", "max_stars_repo_name": "decltypeme/mlpack", "max_stars_repo_head_hexsha": "e3b418918fffce382ce9d8ceee9d9349ca199611", "max_stars_repo_licenses": ["BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-08-17T11:59:28.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-17T11:59:28.000Z", "max_issues_repo_path": "src/mlpack/tests/lsh_test.cpp", "max_issues_repo_name": "decltypeme/mlpack", "max_issues_repo_head_hexsha": "e3b418918fffce382ce9d8ceee9d9349ca199611", "max_issues_repo_licenses": ["BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/mlpack/tests/lsh_test.cpp", "max_forks_repo_name": "decltypeme/mlpack", "max_forks_repo_head_hexsha": "e3b418918fffce382ce9d8ceee9d9349ca199611", "max_forks_repo_licenses": ["BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.2893081761, "max_line_length": 80, "alphanum_fraction": 0.6463253353, "num_tokens": 1893, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8198933359135361, "lm_q2_score": 0.6076631698328916, "lm_q1q2_score": 0.4982189834260831}} {"text": "#pragma once\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \"spectral/hermiten.hpp\"\n#include \"spectral/mpfr/import_std_math.hpp\"\n\nnamespace boltzmann {\n\n// ----------------------------------------------------------------------\nnamespace detail {\ntemplate \nclass sentry\n{\n public:\n typedef NUMERIC_T numeric_t;\n\n private:\n typedef std::vector vec_t;\n\n public:\n sentry(int n) { this->init(n); }\n\n sentry() { /* empty */}\n\n void init(int n)\n {\n n_ = n;\n factors_.resize(6);\n std::for_each(factors_.begin(), factors_.end(), [&](vec_t& v) { v.reserve(n + 1); });\n }\n\n NUMERIC_T coeff(int i, int j, int t);\n NUMERIC_T operator()(int i, int j, numeric_t x);\n\n private:\n int n_;\n std::vector factors_;\n};\n\n// ----------------------------------------------------------------------\ntemplate \nNUMERIC_T\nsentry::coeff(int i, int j, int t)\n{\n const int maxij = std::max(i, j);\n const int minij = std::min(i, j);\n // (min(i,j) ... 1)\n auto& v0 = factors_[0];\n v0.resize(minij);\n for (int k = 0; k < minij; ++k) {\n v0[k] = std::min(i, j) - k;\n }\n\n // sqrt(max(i,j) .. min(i,j)+1)\n auto& v1 = factors_[1];\n v1.resize(maxij - minij);\n for (int k = 0; k < maxij - minij; ++k) {\n v1[k] = ::math::sqrt(numeric_t(maxij - k));\n }\n\n // (i-t)!\n auto& v2 = factors_[2];\n v2.resize(i - t);\n for (int k = 0; k < i - t; ++k) {\n v2[k] = 1 / numeric_t(i - t - k);\n }\n\n // (j-t)!\n auto& v3 = factors_[3];\n v3.resize(j - t);\n for (int k = 0; k < j - t; ++k) {\n v3[k] = 1 / numeric_t(j - t - k);\n }\n\n // t!\n auto& v4 = factors_[4];\n v4.resize(t);\n for (int k = 0; k < t; ++k) {\n v4[k] = 1 / numeric_t(t - k);\n }\n\n // 2^(t-(i+j)/2)\n auto& v5 = factors_[5];\n int exp2 = t - (i + j) / 2;\n v5.resize(std::abs(exp2));\n if (exp2 > 0)\n for (unsigned int k = 0; k < v5.size(); ++k) {\n v5[k] = 2;\n }\n else\n for (unsigned int k = 0; k < v5.size(); ++k) {\n v5[k] = 1 / numeric_t(2);\n }\n // cout << \"v5:\\t\";\n // for_each(v5.begin(), v5.end(), [](numeric_t v) { cout << v << \"\\t\"; });\n // cout << endl;\n\n std::sort(factors_.begin(), factors_.end(), [](const vec_t& v1, const vec_t& v2) {\n return v1.size() < v2.size();\n });\n std::vector lengths;\n std::for_each(\n factors_.begin(), factors_.end(), [&](const vec_t& v) { lengths.push_back(v.size()); });\n\n numeric_t f = (std::abs(i - t) % 2) ? -1 : 1;\n // multiply\n for (int jp = 0; jp < lengths[0]; ++jp) {\n numeric_t loc = 1;\n for (unsigned int fi = 0; fi < factors_.size(); ++fi) {\n loc *= factors_[fi][jp];\n }\n f *= loc;\n }\n for (unsigned int l = 1; l < lengths.size(); ++l) {\n for (int jp = lengths[l - 1]; jp < lengths[l]; ++jp) { // loop over remaining positions\n numeric_t loc = 1;\n for (unsigned int fi = l; fi < factors_.size(); ++fi) {\n loc *= factors_[fi][jp];\n }\n f *= loc;\n }\n }\n\n // 2^(t-(i+j)/2) missing term\n if ((i + j) % 2 && exp2 <= 0)\n f /= ::math::sqrt(numeric_t(2));\n else if ((i + j) % 2 && exp2 > 0)\n f *= ::math::sqrt(numeric_t(2));\n\n return f;\n}\n\n// ----------------------------------------------------------------------\ntemplate \nNUMERIC_T\nsentry::operator()(int i, int j, numeric_t x)\n{\n numeric_t sum = 0;\n\n // TOOD implement Horner's scheme\n for (int l = std::abs(i - j); l <= i + j; l += 2) {\n numeric_t loc = this->coeff(i, j, (i + j - l) / 2);\n\n sum += loc * ::math::pow(x, l);\n }\n\n return sum * ::math::exp(numeric_t(-x * x / 4));\n ;\n}\n\n/**\n * @brief polyval (Horner scheme)\n *\n * @param coeffs\n * @param x\n * @param N length of coeffs\n */\ntemplate \ninline NUMERIC_T\npolyval(NUMERIC_T* coeffs, NUMERIC_T x, int N)\n{\n typedef NUMERIC_T numeric_t;\n numeric_t b = coeffs[N - 1];\n for (int i = 1; i < N - 1; ++i) {\n b = coeffs[N - 1 - i] + b * x;\n }\n return x * b + coeffs[0];\n}\n\n} // end namespace detail\n\n// ----------------------------------------------------------------------\n/**\n * @brief Assemble shift matrix \\f$ S^{\\bar{x}}\\f$.\n *\n * @tparam NUMERIC_T numeric type\n *\n */\ntemplate \nclass HShiftMatrix\n{\n private:\n typedef NUMERIC_T numeric_t;\n typedef std::vector vec_t;\n\n public:\n typedef Eigen::Matrix matrix_t;\n\n public:\n /**\n *\n * @param N max polynomial degree\n */\n HShiftMatrix(int N) { this->init(N); }\n\n HShiftMatrix() { /* empty */}\n\n /**\n * Compute polynomial coefficients of p(i,j;x)\n *\n */\n void init(int N);\n\n /**\n * Create linear Operator S^x by evaluating the polynomial\n *\n * @param x\n */\n void setx(numeric_t x);\n\n const matrix_t& get() const { return S_; }\n void dump(std::string fname) const;\n\n private:\n int size_;\n boost::multi_array coeffs_;\n /* compute coefficients of the S_-entry polynomial */\n detail::sentry G_;\n matrix_t S_;\n};\n\n// ----------------------------------------------------------------------\ntemplate \nvoid\nHShiftMatrix::init(int N)\n{\n size_ = N + 1;\n G_.init(N + 1);\n S_.resize(N + 1, N + 1);\n coeffs_.resize(boost::extents[size_][size_][2 * size_ - 1]);\n\n // std::fill(coeffs_.origin(), coeffs_.origin() + coeffs_.num_elements(), 0);\n for (int i = 0; i < size_; ++i) {\n for (int j = 0; j < size_; ++j) {\n for (int l = std::abs(i - j); l <= i + j; l += 2) {\n coeffs_[i][j][l] = G_.coeff(i, j, (i + j - l) / numeric_t(2));\n }\n }\n }\n}\n\n// ----------------------------------------------------------------------\ntemplate \nvoid\nHShiftMatrix::setx(numeric_t x)\n{\n // Hint: polyval can be further optimized by using knowledge\n // about which coefficients are zero.\n numeric_t f = ::math::exp(numeric_t(-x * x / 4));\n for (int i = 0; i < size_; ++i) {\n for (int j = 0; j < size_; ++j) {\n S_(i, j) = f * detail::polyval(coeffs_[i][j].origin(), x, coeffs_.shape()[2]);\n }\n }\n}\n\n// ----------------------------------------------------------------------\ntemplate \nvoid\nHShiftMatrix::dump(std::string fname) const\n{\n std::ofstream fout(fname);\n fout << std::setprecision(10);\n fout << std::scientific;\n fout << S_;\n fout.close();\n}\n\n} // end namespace boltzmann\n", "meta": {"hexsha": "0834262d0fd16b032f75b7a3bc6efd883292e3f3", "size": 6530, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "spectral/shift_hermite.hpp", "max_stars_repo_name": "simonpp/2dRidgeletBTE", "max_stars_repo_head_hexsha": "5d08cbb5c57fc276c7a528f128615d23c37ef6a0", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-11-08T03:15:56.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-08T03:15:56.000Z", "max_issues_repo_path": "spectral/shift_hermite.hpp", "max_issues_repo_name": "simonpp/2dRidgeletBTE", "max_issues_repo_head_hexsha": "5d08cbb5c57fc276c7a528f128615d23c37ef6a0", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "spectral/shift_hermite.hpp", "max_forks_repo_name": "simonpp/2dRidgeletBTE", "max_forks_repo_head_hexsha": "5d08cbb5c57fc276c7a528f128615d23c37ef6a0", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2019-11-08T03:15:56.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-08T03:15:56.000Z", "avg_line_length": 23.4892086331, "max_line_length": 94, "alphanum_fraction": 0.5234303216, "num_tokens": 2077, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7690802476562641, "lm_q2_score": 0.6477982247516797, "lm_q1q2_score": 0.4982088191233101}} {"text": "//==================================================================================================\n/*!\n @file\n\n @copyright 2016 NumScale SAS\n @copyright 2016 J.T. Lapreste\n\n Distributed under the Boost Software License, Version 1.0.\n (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)\n*/\n//==================================================================================================\n#ifndef BOOST_SIMD_FUNCTION_DIVS_HPP_INCLUDED\n#define BOOST_SIMD_FUNCTION_DIVS_HPP_INCLUDED\n\n#if defined(DOXYGEN_ONLY)\nnamespace boost { namespace simd\n{\n\n /*!\n\n @ingroup group-arithmetic\n Function object implementing divs capabilities\n\n Computes the truncated saturated division of its parameters.\n\n @par semantic:\n For any given value @c x, @c y of type @c T:\n\n @code\n T r = divs(x, y);\n @endcode\n\n The code is similar to:\n\n @code\n T r = trunc(x/y);\n @endcode\n\n for integral types, if y is @ref Zero, it returns @ref Valmax (resp. @ref Valmin)\n if x is positive (resp. negative) and @ref Zero if x is @ref Zero.\n\n Saturated means that for signed integer types,\n @c divs(Valmin,-1) returns @ref Valmax.\n\n @par Alias\n\n @c rdivide\n\n @see divides, rec, divfloor, divceil, divround, divround2even, divfix\n\n **/\n const boost::dispatch::functor divs = {};\n} }\n#endif\n\n#include \n#include \n\n#endif\n", "meta": {"hexsha": "b96809f4511f2237bd289c6e046d0bc20ce8cc14", "size": 1461, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/boost/simd/function/divs.hpp", "max_stars_repo_name": "yaeldarmon/boost.simd", "max_stars_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/boost/simd/function/divs.hpp", "max_issues_repo_name": "yaeldarmon/boost.simd", "max_issues_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/boost/simd/function/divs.hpp", "max_forks_repo_name": "yaeldarmon/boost.simd", "max_forks_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.35, "max_line_length": 100, "alphanum_fraction": 0.5954825462, "num_tokens": 356, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7690802476562641, "lm_q2_score": 0.647798211152541, "lm_q1q2_score": 0.49820880866448114}} {"text": "#include \n#include \n#include \n#include \n\n// This typedefs and methods will be in header,\n// that wraps around native SQL interface.\ntypedef boost::variant cell_t;\ntypedef std::vector db_row_t;\n\n// This is just an example, no actual work with database.\ndb_row_t get_row(const char* /*query*/) {\n // See recipe \"Type 'reference to string'\"\n // for a better type for 'query' parameter.\n db_row_t row;\n row.push_back(10);\n row.push_back(10.1f);\n row.push_back(\"hello again\");\n return row;\n}\n\n// This is how code required to sum values\n// We can provide no template parameter\n// to boost::static_visitor<> if our visitor returns nothing.\nstruct db_sum_visitor: public boost::static_visitor {\n double operator()(int value) const {\n return value;\n }\n double operator()(float value) const {\n return value;\n }\n double operator()(const std::string& /*value*/) const {\n return 0.0;\n }\n};\n\nint main() {\n db_row_t row = get_row(\"Query: Give me some row, please.\");\n double res = 0.0;\n for (db_row_t::const_iterator it = row.begin(), end = row.end(); it != end; ++it) {\n res += boost::apply_visitor(db_sum_visitor(), *it);\n }\n std::cout << \"Sum of arithmetic types in database row is: \" << res << std::endl;\n}\n", "meta": {"hexsha": "04c859b7553ba476320c4f874d3d90ab1f450dbb", "size": 1364, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Chapter01/04_B_variant_db_example/main.cpp", "max_stars_repo_name": "PacktPublishing/Boost-Cpp-Application-Development-Cookbook-Second-Edition", "max_stars_repo_head_hexsha": "ffea2895138d3af1f4e35d657a726f6bd55b9030", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 33.0, "max_stars_repo_stars_event_min_datetime": "2017-10-29T23:05:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T09:36:42.000Z", "max_issues_repo_path": "Chapter01/04_B_variant_db_example/main.cpp", "max_issues_repo_name": "PacktPublishing/Boost-Cpp-Application-Development-Cookbook-Second-Edition", "max_issues_repo_head_hexsha": "ffea2895138d3af1f4e35d657a726f6bd55b9030", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Chapter01/04_B_variant_db_example/main.cpp", "max_forks_repo_name": "PacktPublishing/Boost-Cpp-Application-Development-Cookbook-Second-Edition", "max_forks_repo_head_hexsha": "ffea2895138d3af1f4e35d657a726f6bd55b9030", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18.0, "max_forks_repo_forks_event_min_datetime": "2017-09-07T18:47:49.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-07T09:37:39.000Z", "avg_line_length": 30.3111111111, "max_line_length": 87, "alphanum_fraction": 0.6554252199, "num_tokens": 345, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7690802370707281, "lm_q2_score": 0.6477982043529715, "lm_q1q2_score": 0.49820879657777534}} {"text": "#include \n#include \n#include \n#include \n#include \n#include \n#include \n// Bring in some random number generation from Schweizer Messer.\n#include \n#include \n#include \n#include \n\n\nint main(int argc, char ** argv)\n{\n if(argc != 2)\n {\n std::cout << \"Usage: example K\\n\";\n std::cout << \"The argument K is the number of timesteps to include in the optimization\\n\";\n return 1;\n }\n\n const int K = atoi(argv[1]);\n\n try \n {\n // The true wall position\n const double true_w = -5.0;\n \n // The noise properties.\n const double sigma_n = 0.01;\n const double sigma_u = 0.1;\n const double sigma_x = 0.01;\n\n // Create random odometry\n std::vector true_u_k(K);\n BOOST_FOREACH(double & u, true_u_k)\n\t{\n\t u = sm::random::uniform();\n\t}\n \n // Create the noisy odometry\n std::vector u_k(K);\n for(int k = 0; k < K; ++k)\n\t{\n\t u_k[k] = true_u_k[k] + sigma_u * sm::random::normal();\n\t}\n\n // Create the states from noisy odometry.\n std::vector x_k(K);\n std::vector true_x_k(K);\n x_k[0] = 0.0;\n true_x_k[0] = 0.0;\n for(int k = 1; k < K; ++k)\n\t{\n\t true_x_k[k] = true_x_k[k-1] + true_u_k[k];\n\t x_k[k] = x_k[k-1] + u_k[k];\n\t}\n\n\n // Create the noisy measurments\n std::vector y_k(K);\n for(int k = 0; k < K; ++k)\n\t{\n\t y_k[k] = (1.0 / (true_w - true_x_k[k])) + sigma_n * sm::random::normal();\n\t}\n \n // Now we can build an optimization problem.\n boost::shared_ptr problem( new aslam::backend::OptimizationProblem);\n \n // First, create a design variable for the wall position.\n boost::shared_ptr dv_w(new aslam::backend::ScalarDesignVariable(true_w + sm::random::normal()));\n // Setting this active means we estimate it.\n dv_w->setActive(true);\n // Add it to the optimization problem.\n problem->addDesignVariable(dv_w);\n\n // Now we add the initial state.\n boost::shared_ptr dv_x_km1(new aslam::backend::ScalarDesignVariable(x_k[0]));\n // Setting this active means we estimate it.\n dv_x_km1->setActive(true);\n // Add it to the optimization problem.\n problem->addDesignVariable(dv_x_km1);\n\n // Now create a prior for this initial state.\n boost::shared_ptr prior(new aslam::backend::ErrorTermPrior(dv_x_km1.get(), true_x_k[0], sigma_x * sigma_x));\n // and add it to the problem.\n problem->addErrorTerm(prior);\n \n // Now march through the states creating design variables,\n // odometry error terms and measurement error terms.\n for(int k = 1; k < K; ++k)\n\t{\n\t boost::shared_ptr dv_x_k(new aslam::backend::ScalarDesignVariable(x_k[k]));\n\t dv_x_k->setActive(true);\n\t problem->addDesignVariable(dv_x_k);\n\n\t // Create odometry error\n\t boost::shared_ptr em(new aslam::backend::ErrorTermMotion(dv_x_km1.get(), dv_x_k.get(), u_k[k], sigma_u * sigma_u));\n\t problem->addErrorTerm(em);\n\t \n\t // Create observation error\n\t boost::shared_ptr eo(new aslam::backend::ErrorTermObservation(dv_x_k.get(), dv_w.get(), y_k[k], sigma_n * sigma_n));\n\t problem->addErrorTerm(eo);\n\t \n\t // Move this design variable to the x_{k-1} position for use in the next loop.\n\t dv_x_km1 = dv_x_k;\n\t}\n\n // Now we have a valid optimization problem full of design variables and error terms.\n // Create some optimization options.\n aslam::backend::OptimizerOptions options;\n options.verbose = true;\n options.linearSolver = \"cholmod\";\n options.levenbergMarquardtLambdaInit = 10;\n options.doSchurComplement = false;\n options.doLevenbergMarquardt = true;\n // Force it to over-optimize\n options.convergenceDeltaX = 1e-12;\n options.convergenceDeltaJ = 1e-12;\n // Then create the optimizer and go!\n aslam::backend::Optimizer optimizer(options);\n optimizer.setProblem( problem );\n optimizer.optimize();\n\n \n }\n catch(const std::exception & e)\n {\n std::cout << \"Exception during processing: \" << e.what();\n return 1;\n }\n\n std::cout << \"Processing completed successfully\\n\";\n return 0;\n}\n", "meta": {"hexsha": "f78270df66cdb455b9014a26ef48bf35ae42a703", "size": 4649, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "aslam_backend_tutorial/src/example.cpp", "max_stars_repo_name": "ethz-asl/aslam_optimizer", "max_stars_repo_head_hexsha": "8e9dd18f9f0d8af461e88e108a3beda2003daf11", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 33.0, "max_stars_repo_stars_event_min_datetime": "2017-04-26T13:30:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T01:52:22.000Z", "max_issues_repo_path": "aslam_backend_tutorial/src/example.cpp", "max_issues_repo_name": "ethz-asl/aslam_optimizer", "max_issues_repo_head_hexsha": "8e9dd18f9f0d8af461e88e108a3beda2003daf11", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 15.0, "max_issues_repo_issues_event_min_datetime": "2017-02-14T16:02:31.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-12T06:07:22.000Z", "max_forks_repo_path": "aslam_backend_tutorial/src/example.cpp", "max_forks_repo_name": "ethz-asl/aslam_optimizer", "max_forks_repo_head_hexsha": "8e9dd18f9f0d8af461e88e108a3beda2003daf11", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 8.0, "max_forks_repo_forks_event_min_datetime": "2017-06-28T04:17:08.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-10T04:58:36.000Z", "avg_line_length": 33.6884057971, "max_line_length": 157, "alphanum_fraction": 0.6536889654, "num_tokens": 1249, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7956581000631542, "lm_q2_score": 0.6261241772283034, "lm_q1q2_score": 0.4981807732570775}} {"text": "//==============================================================================\n// Copyright 2003 - 2011 LASMEA UMR 6602 CNRS/Univ. Clermont II\n// Copyright 2009 - 2011 LRI UMR 8623 CNRS/Univ Paris Sud XI\n//\n// Distributed under the Boost Software License, Version 1.0.\n// See accompanying file LICENSE.txt or copy at\n// http://www.boost.org/LICENSE_1_0.txt\n//==============================================================================\n#ifndef BOOST_SIMD_ARITHMETIC_FUNCTIONS_SCALAR_ISQRT_HPP_INCLUDED\n#define BOOST_SIMD_ARITHMETIC_FUNCTIONS_SCALAR_ISQRT_HPP_INCLUDED\n\n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace boost { namespace simd { namespace ext\n{\n BOOST_SIMD_FUNCTOR_IMPLEMENTATION( boost::simd::tag::isqrt_, tag::cpu_\n , (A0)\n , (scalar_< arithmetic_ >)\n )\n {\n typedef typename dispatch::meta::as_integer::type result_type;\n BOOST_SIMD_FUNCTOR_CALL(1)\n {\n return itrunc(boost::simd::sqrt(a0));\n }\n };\n\n BOOST_SIMD_FUNCTOR_IMPLEMENTATION( boost::simd::tag::isqrt_, tag::cpu_\n , (A0)\n , (scalar_< uint_ >)\n )\n {\n typedef A0 result_type;\n BOOST_SIMD_FUNCTOR_CALL(1)\n {\n return static_cast(boost::simd::sqrt(result_type(a0)));\n }\n };\n\n BOOST_SIMD_FUNCTOR_IMPLEMENTATION( boost::simd::tag::isqrt_, tag::cpu_\n , (A0)\n , (scalar_< int_ >)\n )\n {\n typedef A0 result_type;\n BOOST_SIMD_FUNCTOR_CALL(1)\n {\n return (is_ltz(a0)) ? Zero() : static_cast(boost::simd::sqrt(result_type(a0)));\n }\n };\n} } }\n\n\n#endif\n", "meta": {"hexsha": "882c53652d62eb436ffc5e9a74801f9558230a88", "size": 2092, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "modules/boost/simd/arithmetic/include/boost/simd/arithmetic/functions/scalar/isqrt.hpp", "max_stars_repo_name": "pbrunet/nt2", "max_stars_repo_head_hexsha": "2aeca0f6a315725b335efd5d9dc95d72e10a7fb7", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "modules/boost/simd/arithmetic/include/boost/simd/arithmetic/functions/scalar/isqrt.hpp", "max_issues_repo_name": "pbrunet/nt2", "max_issues_repo_head_hexsha": "2aeca0f6a315725b335efd5d9dc95d72e10a7fb7", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "modules/boost/simd/arithmetic/include/boost/simd/arithmetic/functions/scalar/isqrt.hpp", "max_forks_repo_name": "pbrunet/nt2", "max_forks_repo_head_hexsha": "2aeca0f6a315725b335efd5d9dc95d72e10a7fb7", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8666666667, "max_line_length": 94, "alphanum_fraction": 0.5535372849, "num_tokens": 492, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7956581000631541, "lm_q2_score": 0.6261241632752915, "lm_q1q2_score": 0.49818076215525053}} {"text": "#include \"Common.h\"\n#include \"CPS3.h\"\n#include \"PropertiesHolder/PropertiesHolder.h\"\n#include \"Material.h\"\n#include \n\nvoid CPS3::SetIndices(const std::vector& indices)\n{\n\tassert(indices.size() == 3);\n\tm_nodes[0] = indices[0];\n\tm_nodes[1] = indices[1];\n\tm_nodes[2] = indices[2];\n}\n\nstd::vector CPS3::GetIndices() const\n{\n\tstd::vector indices(3);\n\tindices[0] = m_nodes[0];\n\tindices[1] = m_nodes[1];\n\tindices[2] = m_nodes[2];\n\treturn indices;\n}\n\nstd::vector CPS3::GetFunctionValuesAtNodes(const Eigen::VectorXf& deforms)const\n{\n\tEigen::Matrix uv;\n\tfor (int i = 0; i < 3; ++i)\n\t{\n\t\tuv[2 * i + 0] = deforms[2 * m_nodes[i] + 0];\n\t\tuv[2 * i + 1] = deforms[2 * m_nodes[i] + 1];\n\t}\n\n\tEigen::Vector3f strain = m_B * uv;\n\t\n\tstd::vector output;\n\tfor (int i = 0; i < 3; i++)\n\t{\n\t\toutput.push_back(strain);\n\t}\n\treturn output;\n}\n\nvoid CPS3::CalcK(const StrideDataArray& nodes, const tfem::MaterialPtr mat, std::vector >& tripletVector)\n{\n\tm_mat = mat;\n\tEigen::Vector3f x;\n\tEigen::Vector3f y;\n\tfor (int i = 0; i < 3; ++i)\n\t{\n\t\tx[i] = nodes(m_nodes[i], 0);\n\t\ty[i] = nodes(m_nodes[i], 1);\n\t}\n\tEigen::Matrix3f C;\n\tC << Eigen::Vector3f(1.0f, 1.0f, 1.0f), x, y;\n\n\tfloat area = C.determinant() / 2.0f;\n\n\tEigen::Matrix3f IC = C.inverse();\n\t\n\tfor (int i = 0; i < 3; i++)\n\t{\n\t\tm_B(0, 2 * i + 0) = IC(1, i);\n\t\tm_B(0, 2 * i + 1) = 0.0f;\n\t\tm_B(1, 2 * i + 0) = 0.0f;\n\t\tm_B(1, 2 * i + 1) = IC(2, i);\n\t\tm_B(2, 2 * i + 0) = IC(2, i);\n\t\tm_B(2, 2 * i + 1) = IC(1, i);\n\t}\n\tEigen::Matrix K = m_B.transpose() * mat->GetElasticityMatrix(fem::PT_FlatStress) * m_B * area;\n\tGrabTriplets(K, tripletVector);\n}\n\nvoid CPS3::GrabTriplets(const Eigen::Matrix& K, std::vector >& tripletVector) const\n{\n\tfor (int i = 0; i < 3; i++)\n\t{\n\t\tfor (int j = 0; j < 3; j++)\n\t\t{\n\t\t\tEigen::Triplet trplt11(2 * m_nodes[i] + 0, 2 * m_nodes[j] + 0, K(2 * i + 0, 2 * j + 0));\n\t\t\tEigen::Triplet trplt12(2 * m_nodes[i] + 0, 2 * m_nodes[j] + 1, K(2 * i + 0, 2 * j + 1));\n\t\t\tEigen::Triplet trplt21(2 * m_nodes[i] + 1, 2 * m_nodes[j] + 0, K(2 * i + 1, 2 * j + 0));\n\t\t\tEigen::Triplet trplt22(2 * m_nodes[i] + 1, 2 * m_nodes[j] + 1, K(2 * i + 1, 2 * j + 1));\n\n\t\t\ttripletVector.push_back(trplt11);\n\t\t\ttripletVector.push_back(trplt12);\n\t\t\ttripletVector.push_back(trplt21);\n\t\t\ttripletVector.push_back(trplt22);\n\t\t}\n\t}\n}\n\ntfem::Material* CPS3::GetMaterial()\n{\n\treturn m_mat.get();\n}\n\nIElement* CPS3::Create()\n{\n\treturn new CPS3;\n}\n\nCPS3::CPS3()\n{\n\n}", "meta": {"hexsha": "9eaa6598b8d194b51e60bfb8ddb86aedf0750184", "size": 2548, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "sources/Elements/CPS3.cpp", "max_stars_repo_name": "podgorskiy/TinyFEM", "max_stars_repo_head_hexsha": "c1a5fedf21e6306fc11fa19afdaf48dab1b6740f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2017-11-05T14:01:04.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-11T15:24:54.000Z", "max_issues_repo_path": "sources/Elements/CPS3.cpp", "max_issues_repo_name": "podgorskiy/TinyFEM", "max_issues_repo_head_hexsha": "c1a5fedf21e6306fc11fa19afdaf48dab1b6740f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sources/Elements/CPS3.cpp", "max_forks_repo_name": "podgorskiy/TinyFEM", "max_forks_repo_head_hexsha": "c1a5fedf21e6306fc11fa19afdaf48dab1b6740f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.2666666667, "max_line_length": 127, "alphanum_fraction": 0.5973312402, "num_tokens": 1045, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8459424373085146, "lm_q2_score": 0.588889130767832, "lm_q1q2_score": 0.4981663065862324}} {"text": "#include \n#include \n#include \n#include \n\nusing namespace Utility;\nusing Utility::Constants::Pi;\n\nnamespace Engine\n{\nnamespace Solver_Kernels\n{\n\n void sib_transform(const vectorfield & spins, const vectorfield & force, vectorfield & out)\n {\n int n = spins.size();\n\n auto s = spins.data();\n auto f = force.data();\n auto o = out.data();\n\n Backend::par::apply( n, [s,f,o] SPIRIT_LAMBDA (int idx) {\n Vector3 e1, a2, A;\n scalar detAi;\n e1 = s[idx];\n A = 0.5 * f[idx];\n\n // 1/determinant(A)\n detAi = 1.0 / (1 + pow(A.norm(), 2.0));\n\n // calculate equation witho the predictor?\n a2 = e1 - e1.cross(A);\n\n o[idx][0] = (a2[0] * (A[0] * A[0] + 1 ) + a2[1] * (A[0] * A[1] - A[2]) + a2[2] * (A[0] * A[2] + A[1])) * detAi;\n o[idx][1] = (a2[0] * (A[1] * A[0] + A[2]) + a2[1] * (A[1] * A[1] + 1 ) + a2[2] * (A[1] * A[2] - A[0])) * detAi;\n o[idx][2] = (a2[0] * (A[2] * A[0] - A[1]) + a2[1] * (A[2] * A[1] + A[0]) + a2[2] * (A[2] * A[2] + 1 )) * detAi;\n } );\n }\n\n void oso_calc_gradients(vectorfield & grad, const vectorfield & spins, const vectorfield & forces)\n {\n const Matrix3 t = ( Matrix3() << 0,0,1,0,-1,0,1,0,0 ).finished();\n\n auto g=grad.data();\n auto s=spins.data();\n auto f=forces.data();\n\n Backend::par::apply( spins.size(), [g,s,f,t] SPIRIT_LAMBDA (int idx)\n {\n g[idx] = t * (-s[idx].cross(f[idx]));\n }\n );\n }\n\n void oso_rotate( std::vector> & configurations, std::vector & searchdir)\n {\n int noi = configurations.size();\n int nos = configurations[0]->size();\n for(int img=0; imgdata();\n auto sd = searchdir[img].data();\n \n Backend::par::apply( nos, [s, sd] SPIRIT_LAMBDA (int idx) \n {\n scalar theta = (sd[idx]).norm();\n scalar q = cos(theta), w = 1-q, \n x = -sd[idx][0]/theta, y = -sd[idx][1]/theta, z = -sd[idx][2]/theta,\n s1 = -y*z*w, s2 = x*z*w, s3 = -x*y*w,\n p1 = x*sin(theta), p2 = y*sin(theta), p3 = z*sin(theta);\n\n scalar t1, t2, t3;\n if(theta > 1.0e-20) // if theta is too small we do nothing\n {\n t1 = (q+z*z*w) * s[idx][0] + (s1+p1) * s[idx][1] + (s2+p2) * s[idx][2];\n t2 = (s1-p1) * s[idx][0] + (q+y*y*w) * s[idx][1] + (s3+p3) * s[idx][2];\n t3 = (s2-p2) * s[idx][0] + (s3-p3) * s[idx][1] + (q+x*x*w) * s[idx][2];\n s[idx][0] = t1;\n s[idx][1] = t2;\n s[idx][2] = t3;\n };\n }\n );\n }\n }\n\n scalar maximum_rotation(const vectorfield & searchdir, scalar maxmove)\n {\n int nos = searchdir.size();\n scalar theta_rms = 0;\n theta_rms = sqrt( Backend::par::reduce(searchdir, [] SPIRIT_LAMBDA (const Vector3 & v){ return v.squaredNorm(); }) / nos );\n scalar scaling = (theta_rms > maxmove) ? maxmove/theta_rms : 1.0;\n return scaling;\n }\n\n void atlas_rotate(std::vector> & configurations, const std::vector & a3_coords, const std::vector & searchdir)\n {\n int noi = configurations.size();\n int nos = configurations[0]->size();\n for(int img=0; imgdata();\n auto d = searchdir[img].data();\n auto a3 = a3_coords[img].data();\n Backend::par::apply(nos, [nos, spins, d, a3] SPIRIT_LAMBDA (int idx) {\n const scalar gamma = (1 + spins[idx][2] * a3[idx]);\n const scalar denom = (spins[idx].head<2>().squaredNorm())/gamma + 2 * d[idx].dot( spins[idx].head<2>() ) + gamma * d[idx].squaredNorm();\n spins[idx].head<2>() = 2*(spins[idx].head<2>() + d[idx]*gamma);\n spins[idx][2] = a3[idx] * (gamma - denom);\n spins[idx] *= 1/(gamma + denom);\n } );\n }\n }\n\n void atlas_calc_gradients(vector2field & residuals, const vectorfield & spins, const vectorfield & forces, const scalarfield & a3_coords)\n {\n auto s = spins.data();\n auto a3 = a3_coords.data();\n auto g = residuals.data();\n auto f = forces.data();\n\n Backend::par::apply(spins.size(), [s, a3, g, f] SPIRIT_LAMBDA (int idx) {\n\n scalar J00 = s[idx][1] * s[idx][1] + s[idx][2]*(s[idx][2] + a3[idx]);\n scalar J10 = -s[idx][0] * s[idx][1];\n scalar J01 = -s[idx][0] * s[idx][1];\n scalar J11 = s[idx][0] * s[idx][0] + s[idx][2]*(s[idx][2] + a3[idx]);\n scalar J02 = -s[idx][0] * (s[idx][2] + a3[idx]);\n scalar J12 = -s[idx][1] * (s[idx][2] + a3[idx]);\n\n g[idx][0] = -(J00 * f[idx][0] + J01 * f[idx][1] + J02 * f[idx][2]);\n g[idx][1] = -(J10 * f[idx][0] + J11 * f[idx][1] + J12 * f[idx][2]);\n });\n }\n\n bool ncg_atlas_check_coordinates(const std::vector> & spins, std::vector & a3_coords, scalar tol)\n {\n int noi = spins.size();\n int nos = (*spins[0]).size();\n\n // We use `int` instead of `bool`, because somehow cuda does not like pointers to bool\n // TODO: fix in future\n field result = field(1, int(false));\n\n for(int img=0; imgdata();\n auto a3 = a3_coords[img].data();\n int *res = &result[0];\n\n Backend::par::apply( nos, [s, a3, tol, res] SPIRIT_LAMBDA (int idx) {\n if (s[idx][2]*a3[idx] < tol && res[0] == int(false))\n res[0] = int(true);\n } );\n }\n\n return bool(result[0]);\n }\n\n void lbfgs_atlas_transform_direction(std::vector> & configurations, std::vector & a3_coords, std::vector> & atlas_updates, std::vector> & grad_updates, std::vector & searchdir, std::vector & grad_pr, scalarfield & rho)\n {\n int noi = configurations.size();\n int nos = configurations[0]->size();\n\n for(int n=0; n t1(n_mem), t2(n_mem);\n for(int n=0; n 0) ? 1 : -1;\n factor = (1 - a3[idx] * s[idx][2]) / (1 + a3[idx] * s[idx][2]);\n sd[idx] *= factor;\n g_pr[idx] *= factor;\n\n for(int n=0; n\n#include \n\n#include \"LHLExtractor.h\"\n\nuint32_t LHLExtractor::getInputLength() const {\n return n_;\n}\n\nuint32_t LHLExtractor::getSeedLen() const {\n return n_;\n}\n\nuint32_t LHLExtractor::getOutputLen() const {\n return m_;\n}\n\ndouble LHLExtractor::getMinEntropy() const {\n return k_;\n}\n\ndouble LHLExtractor::getError() const {\n return error_;\n}\n\nBitstring LHLExtractor::extract(const WeakSource &input, const Bitstring &seed) const {\n assert(input.getMinEntropy() >= k_);\n assert(seed.size() == n_);\n NTL::GF2EPush push;\n NTL::GF2E::init(NTL::BuildSparseIrred_GF2X(n_));\n auto x = input.getData().asGF2E();\n auto y = seed.asGF2E();\n return Bitstring((Bitstring(x) + Bitstring(y)).substr(0, m_));\n}\n\nLHLExtractor::LHLExtractor(uint32_t n, uint32_t k, double eps): n_(n), k_(k), error_(eps) {\n m_ = ceil(k_ + n_ - 2 * log2(1. / error_));\n}\n", "meta": {"hexsha": "b4f75620807db7323d5892dc614910848de6c079", "size": 914, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "extractor/seeded/LHLExtractor.cpp", "max_stars_repo_name": "Skird/extractors", "max_stars_repo_head_hexsha": "3c55d2c8377f465a4960861b7f358b18214f5ac3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-06-11T17:19:48.000Z", "max_stars_repo_stars_event_max_datetime": "2019-06-11T17:19:48.000Z", "max_issues_repo_path": "extractor/seeded/LHLExtractor.cpp", "max_issues_repo_name": "Skird/extractors", "max_issues_repo_head_hexsha": "3c55d2c8377f465a4960861b7f358b18214f5ac3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "extractor/seeded/LHLExtractor.cpp", "max_forks_repo_name": "Skird/extractors", "max_forks_repo_head_hexsha": "3c55d2c8377f465a4960861b7f358b18214f5ac3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.4358974359, "max_line_length": 91, "alphanum_fraction": 0.6739606127, "num_tokens": 282, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.8459424295406088, "lm_q2_score": 0.588889130767832, "lm_q1q2_score": 0.4981663020117971}} {"text": "#include \"advent.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n\nauto day05(int argc, char** argv) -> int\n{\n std::ifstream infile(argv[1]); // NOLINT\n std::string str;\n\n using point = std::tuple;\n using line = std::pair;\n std::vector lines;\n\n while(std::getline(infile, str)) {\n std::vector tokens;\n util::tokenize(str, ' ', tokens);\n int x = 0;\n int y = 0;\n scn::scan(tokens[0], \"{},{}\", x, y);\n point p1{x, y};\n scn::scan(tokens[2], \"{},{}\", x, y);\n point p2{x, y};\n lines.emplace_back(p1, p2);\n }\n\n int64_t xmax = std::numeric_limits::min();\n int64_t ymax = xmax;\n\n for(auto [p1, p2] : lines) {\n auto [x1, y1] = p1;\n auto [x2, y2] = p2;\n xmax = std::max({xmax, x1, x2});\n ymax = std::max({ymax, y1, y2});\n }\n\n Eigen::Array map = decltype(map)::Zero(xmax + 1, ymax + 1);\n for (auto [p1, p2] : lines) {\n auto [x1, y1] = p1;\n auto [x2, y2] = p2;\n\n if (x1 == x2 || y1 == y2 || std::abs(x1-x2) == std::abs(y1-y2)) {\n auto dx = util::sgn(x2-x1);\n auto dy = util::sgn(y2-y1);\n for (auto x = x1, y = y1; (dx == 0 || x != x2) && (dy == 0 || y != y2); x += dx, y += dy) {\n map(x, y) += 1;\n }\n map(x2, y2) += 1;\n }\n }\n auto count = (map > 1).count();\n fmt::print(\"count: {}\\n\", count);\n \n return 0;\n}\n", "meta": {"hexsha": "8b87abbdb560f6313cf6762e36c2156db4b28d9c", "size": 1594, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "source/day05.cpp", "max_stars_repo_name": "foolnotion/aoc2021", "max_stars_repo_head_hexsha": "e2bbcd8cab2a1a7b9922694daff7d289a905c133", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "source/day05.cpp", "max_issues_repo_name": "foolnotion/aoc2021", "max_issues_repo_head_hexsha": "e2bbcd8cab2a1a7b9922694daff7d289a905c133", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "source/day05.cpp", "max_forks_repo_name": "foolnotion/aoc2021", "max_forks_repo_head_hexsha": "e2bbcd8cab2a1a7b9922694daff7d289a905c133", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-12-29T23:05:48.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-29T23:05:48.000Z", "avg_line_length": 26.5666666667, "max_line_length": 103, "alphanum_fraction": 0.4755332497, "num_tokens": 543, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7606506526772883, "lm_q2_score": 0.6548947357776795, "lm_q1q2_score": 0.4981461082042122}} {"text": "/**TODO: Add copyright*/\n\n#define BOOST_TEST_MODULE Preprocessing test suite \n#include \n#include \n\nusing namespace EvoNet;\nusing namespace std;\n\nBOOST_AUTO_TEST_SUITE(preprocessing)\n\nBOOST_AUTO_TEST_CASE(P_selectRandomElement)\n{\n\t// [TODO: make test; currently, combined with selectRandomNode1]\n}\n\nBOOST_AUTO_TEST_CASE(P_UnitScaleFunctor)\n{\n\tEigen::Tensor data(2, 2);\n\tdata.setValues({{ 0, 2 }, { 3, 4 }});\n\tUnitScaleFunctor unit_scale(data);\n\tBOOST_CHECK_CLOSE(unit_scale.getUnitScale(), 0.25, 1e-6);\n\n\tEigen::Tensor data_test = data.unaryExpr(UnitScaleFunctor(data));\n\t\n\tBOOST_CHECK_CLOSE(data_test(0, 0), 0.0, 1e-6);\n\tBOOST_CHECK_CLOSE(data_test(1, 1), 1.0, 1e-6);\n}\n\nBOOST_AUTO_TEST_CASE(P_LinearScaleFunctor)\n{\n\tEigen::Tensor data(2, 2);\n\tdata.setValues({ { 0, 2 }, { 4, 8 } });\n\n\tEigen::Tensor data_test = data.unaryExpr(LinearScaleFunctor(0, 8, -1, 1));\n\n\tBOOST_CHECK_CLOSE(data_test(0, 0), -1.0, 1e-6);\n\tBOOST_CHECK_CLOSE(data_test(0, 1), -0.5, 1e-6);\n\tBOOST_CHECK_CLOSE(data_test(1, 0), 0.0, 1e-6);\n\tBOOST_CHECK_CLOSE(data_test(1, 1), 1.0, 1e-6);\n}\n\nBOOST_AUTO_TEST_CASE(P_LinearScale)\n{\n Eigen::Tensor data(2, 2, 2);\n data.setValues({\n {{ 0, 2 }, { 4, 8 }},\n {{ 1, 1 }, { 3, 5 }}\n });\n\n // Test default initialization for the domain and setters\n LinearScale linearScale1(-1, 1);\n linearScale1.setDomain(0, 8);\n Eigen::Tensor data_test = linearScale1(data);\n\n BOOST_CHECK_CLOSE(data_test(0, 0, 0), -1.0, 1e-6);\n BOOST_CHECK_CLOSE(data_test(0, 0, 1), -0.5, 1e-6);\n BOOST_CHECK_CLOSE(data_test(0, 1, 0), 0.0, 1e-6);\n BOOST_CHECK_CLOSE(data_test(0, 1, 1), 1.0, 1e-6);\n BOOST_CHECK_CLOSE(data_test(1, 0, 0), -0.75, 1e-6);\n BOOST_CHECK_CLOSE(data_test(1, 0, 1), -0.75, 1e-6);\n BOOST_CHECK_CLOSE(data_test(1, 1, 0), -0.25, 1e-6);\n BOOST_CHECK_CLOSE(data_test(1, 1, 1), 0.25, 1e-6);\n\n // Test with manual domain and range initialization\n LinearScale linearScale(0, 8, -1, 1);\n data_test = linearScale(data);\n\n BOOST_CHECK_CLOSE(data_test(0, 0, 0), -1.0, 1e-6);\n BOOST_CHECK_CLOSE(data_test(0, 0, 1), -0.5, 1e-6);\n BOOST_CHECK_CLOSE(data_test(0, 1, 0), 0.0, 1e-6);\n BOOST_CHECK_CLOSE(data_test(0, 1, 1), 1.0, 1e-6);\n BOOST_CHECK_CLOSE(data_test(1, 0, 0), -0.75, 1e-6);\n BOOST_CHECK_CLOSE(data_test(1, 0, 1), -0.75, 1e-6);\n BOOST_CHECK_CLOSE(data_test(1, 1, 0), -0.25, 1e-6);\n BOOST_CHECK_CLOSE(data_test(1, 1, 1), 0.25, 1e-6);\n\n // Test with domain calculation and range initialization\n LinearScale linearScale2(data, -1, 1);\n data_test = linearScale2(data);\n\n BOOST_CHECK_CLOSE(data_test(0, 0, 0), -1.0, 1e-6);\n BOOST_CHECK_CLOSE(data_test(0, 0, 1), -0.5, 1e-6);\n BOOST_CHECK_CLOSE(data_test(0, 1, 0), 0.0, 1e-6);\n BOOST_CHECK_CLOSE(data_test(0, 1, 1), 1.0, 1e-6);\n BOOST_CHECK_CLOSE(data_test(1, 0, 0), -0.75, 1e-6);\n BOOST_CHECK_CLOSE(data_test(1, 0, 1), -0.75, 1e-6);\n BOOST_CHECK_CLOSE(data_test(1, 1, 0), -0.25, 1e-6);\n BOOST_CHECK_CLOSE(data_test(1, 1, 1), 0.25, 1e-6);\n}\n\nBOOST_AUTO_TEST_CASE(P_Standardize)\n{\n Eigen::Tensor data(2, 2, 2);\n data.setValues({\n {{ 0, 2 }, { 4, 8 }},\n {{ 1, 3 }, { 3, 5 }}\n });\n\n // Test default initialization with setters and getters\n Standardize standardize1;\n standardize1.setMeanAndVar(1, 2);\n BOOST_CHECK_CLOSE(standardize1.getMean(), 1, 1e-6);\n BOOST_CHECK_CLOSE(standardize1.getVar(), 2, 1e-6);\n standardize1.setMeanAndVar(data);\n BOOST_CHECK_CLOSE(standardize1.getMean(), 3.25, 1e-6);\n BOOST_CHECK_CLOSE(standardize1.getVar(), 6.21428585, 1e-6);\n\n // Test with data initialization and getters\n Standardize standardize(data);\n BOOST_CHECK_CLOSE(standardize.getMean(), 3.25, 1e-6);\n BOOST_CHECK_CLOSE(standardize.getVar(), 6.21428585, 1e-6);\n\n // Test operator\n Eigen::Tensor data_test = standardize(data);\n BOOST_CHECK_CLOSE(data_test(0, 0, 0), -1.30373025, 1e-6);\n BOOST_CHECK_CLOSE(data_test(0, 0, 1), -0.501434684, 1e-6);\n BOOST_CHECK_CLOSE(data_test(0, 1, 0), 0.300860822, 1e-6);\n BOOST_CHECK_CLOSE(data_test(0, 1, 1), 1.90545189, 1e-6);\n BOOST_CHECK_CLOSE(data_test(1, 0, 0), -0.902582467, 1e-6);\n BOOST_CHECK_CLOSE(data_test(1, 0, 1), -0.100286946, 1e-6);\n BOOST_CHECK_CLOSE(data_test(1, 1, 0), -0.100286946, 1e-6);\n BOOST_CHECK_CLOSE(data_test(1, 1, 1), 0.702008605, 1e-6);\n}\n\nBOOST_AUTO_TEST_CASE(P_MakeShuffleMatrix)\n{\n const int shuffle_dim_size = 8;\n std::vector indices = { 0, 1, 2, 3, 4, 5, 6, 7 };\n\n // Test default initialization with setters and getters\n MakeShuffleMatrix shuffle1;\n shuffle1.setIndices(8);\n BOOST_CHECK(shuffle1.getIndices() != indices);\n for (int i = 0; i < shuffle_dim_size; ++i) {\n BOOST_CHECK_GE(shuffle1.getIndices().at(i), 0);\n BOOST_CHECK_LE(shuffle1.getIndices().at(i), 7);\n }\n shuffle1.setShuffleMatrix(true);\n //std::cout << \"Shuffle_matrix\\n\" << shuffle1.getShuffleMatrix() << std::endl;\n for (int i = 0; i < shuffle_dim_size; ++i) {\n Eigen::Tensor row_sum = shuffle1.getShuffleMatrix().chip(i, 0).sum();\n BOOST_CHECK_EQUAL(row_sum(0), 1);\n }\n\n // Test initialization with dim size\n MakeShuffleMatrix shuffle2(shuffle_dim_size, true);\n BOOST_CHECK(shuffle2.getIndices() != indices);\n for (int i = 0; i < shuffle_dim_size; ++i) {\n BOOST_CHECK_GE(shuffle2.getIndices().at(i), 0);\n BOOST_CHECK_LE(shuffle2.getIndices().at(i), 7);\n }\n\n // Test initialization with indices to use\n MakeShuffleMatrix shuffle3(indices, true);\n BOOST_CHECK(shuffle3.getIndices() == indices);\n //std::cout << \"Shuffle_matrix\\n\" << shuffle3.getShuffleMatrix() << std::endl;\n for (int i = 0; i < shuffle_dim_size; ++i) {\n BOOST_CHECK_EQUAL(shuffle3.getShuffleMatrix()(i, i), 1);\n Eigen::Tensor row_sum = shuffle3.getShuffleMatrix().chip(i, 0).sum();\n BOOST_CHECK_EQUAL(row_sum(0), 1);\n }\n\n // Test row/column shuffling on toy data\n Eigen::Tensor data(2, 3);\n data.setValues({ {1,2,3},{4,5,6} });\n MakeShuffleMatrix shuffle_col(std::vector({1,2,0}), true);\n Eigen::Tensor col_shuffle = data;\n shuffle_col(col_shuffle, true);\n BOOST_CHECK_EQUAL(col_shuffle(0, 0), 2);\n BOOST_CHECK_EQUAL(col_shuffle(0, 1), 3);\n BOOST_CHECK_EQUAL(col_shuffle(0, 2), 1);\n BOOST_CHECK_EQUAL(col_shuffle(1, 0), 5);\n BOOST_CHECK_EQUAL(col_shuffle(1, 1), 6);\n BOOST_CHECK_EQUAL(col_shuffle(1, 2), 4);\n MakeShuffleMatrix shuffle_row(std::vector({ 1,0 }), false);\n Eigen::Tensor row_shuffle = data;\n shuffle_row(row_shuffle, false);\n BOOST_CHECK_EQUAL(row_shuffle(0, 0), 4);\n BOOST_CHECK_EQUAL(row_shuffle(0, 1), 5);\n BOOST_CHECK_EQUAL(row_shuffle(0, 2), 6);\n BOOST_CHECK_EQUAL(row_shuffle(1, 0), 1);\n BOOST_CHECK_EQUAL(row_shuffle(1, 1), 2);\n BOOST_CHECK_EQUAL(row_shuffle(1, 2), 3);\n\n // Test row/column shuffling on toy data\n Eigen::Tensor data_db(2, 3);\n data_db.setValues({ {1,2,3},{4,5,6} });\n MakeShuffleMatrix shuffle_col_db(std::vector({ 1,2,0 }), true);\n Eigen::Tensor col_shuffle_db = data_db;\n shuffle_col_db(col_shuffle_db, true);\n BOOST_CHECK_EQUAL(col_shuffle_db(0, 0), 2);\n BOOST_CHECK_EQUAL(col_shuffle_db(0, 1), 3);\n BOOST_CHECK_EQUAL(col_shuffle_db(0, 2), 1);\n BOOST_CHECK_EQUAL(col_shuffle_db(1, 0), 5);\n BOOST_CHECK_EQUAL(col_shuffle_db(1, 1), 6);\n BOOST_CHECK_EQUAL(col_shuffle_db(1, 2), 4);\n MakeShuffleMatrix shuffle_row_db(std::vector({ 1,0 }), false);\n Eigen::Tensor row_shuffle_db = data_db;\n shuffle_row_db(row_shuffle_db, false);\n BOOST_CHECK_EQUAL(row_shuffle_db(0, 0), 4);\n BOOST_CHECK_EQUAL(row_shuffle_db(0, 1), 5);\n BOOST_CHECK_EQUAL(row_shuffle_db(0, 2), 6);\n BOOST_CHECK_EQUAL(row_shuffle_db(1, 0), 1);\n BOOST_CHECK_EQUAL(row_shuffle_db(1, 1), 2);\n BOOST_CHECK_EQUAL(row_shuffle_db(1, 2), 3);\n}\n\nBOOST_AUTO_TEST_CASE(P_LabelSmoother)\n{\n\tEigen::Tensor data(2);\n\tdata.setValues({ 0, 1 });\n\n\tEigen::Tensor data_test = data.unaryExpr(LabelSmoother(0.1, 0.2));\n\n\tBOOST_CHECK_CLOSE(data_test(0), 0.1, 1e-4);\n\tBOOST_CHECK_CLOSE(data_test(1), 0.8, 1e-4);\n}\n\nBOOST_AUTO_TEST_CASE(P_OneHotEncoder)\n{\n\t// TODO\n}\n\nBOOST_AUTO_TEST_CASE(SFcheckNan)\n{\n\tEigen::Tensor values(2);\n\tvalues.setConstant(5.0f);\n\tEigen::Tensor test(2);\n\n\t// control\n test = values.unaryExpr([](float c) { return checkNan(c); });\n\tBOOST_CHECK_CLOSE(test(0), 5.0, 1e-3);\n\tBOOST_CHECK_CLOSE(test(1), 5.0, 1e-3);\n\n\t// test\n\tvalues(0) = NAN; //NaN\n\tvalues(1) = INFINITY; //infinity\n test = values.unaryExpr([](float c) { return checkNan(c); });\n\tBOOST_CHECK_CLOSE(test(0), NAN, 1e-3);\n\tBOOST_CHECK_CLOSE(test(1), INFINITY, 1e-3);\n}\n\nBOOST_AUTO_TEST_CASE(SFsubstituteNanInf)\n{\n\tEigen::Tensor values(3);\n\tvalues.setConstant(5.0f);\n\tEigen::Tensor test(3);\n\n\t// control\n test = values.unaryExpr([](float c) { return substituteNanInf(c); });\n\tBOOST_CHECK_CLOSE(test(0), 5.0, 1e-3);\n\tBOOST_CHECK_CLOSE(test(1), 5.0, 1e-3);\n\n\t// test\n\tvalues(0) = NAN; //NaN\n\tvalues(1) = INFINITY; //infinity\n\tvalues(2) = -INFINITY; //infinity\n test = values.unaryExpr([](float c) { return substituteNanInf(c); });\n\tBOOST_CHECK_CLOSE(test(0), 0.0, 1e-3);\n\tBOOST_CHECK_CLOSE(test(1), 1e9, 1e-3);\n\tBOOST_CHECK_CLOSE(test(2), -1e9, 1e-3);\n}\n\nBOOST_AUTO_TEST_CASE(SFClipOp)\n{\n\tEigen::Tensor net_input(3);\n\tnet_input.setValues({ 0.0f, 1.0f, 0.5f });\n\n\t// test input\n\tEigen::Tensor result = net_input.unaryExpr(ClipOp(0.1f, 0.0f, 1.0f));\n\tBOOST_CHECK_CLOSE(result(0), 0.1, 1e-3);\n\tBOOST_CHECK_CLOSE(result(1), 0.9, 1e-3);\n\tBOOST_CHECK_CLOSE(result(2), 0.5, 1e-3);\n}\n\nBOOST_AUTO_TEST_CASE(SFRandBinaryOp)\n{\n\tEigen::Tensor net_input(2);\n\tnet_input.setValues({ 2.0f, 2.0f });\n\tEigen::Tensor result;\n\n\t// test input\n\tresult = net_input.unaryExpr(RandBinaryOp(0.0f));\n\tBOOST_CHECK_CLOSE(result(0), 2.0, 1e-3);\n\tBOOST_CHECK_CLOSE(result(1), 2.0, 1e-3);\n\tresult = net_input.unaryExpr(RandBinaryOp(1.0f));\n\tBOOST_CHECK_CLOSE(result(0), 0.0, 1e-3);\n\tBOOST_CHECK_CLOSE(result(1), 0.0, 1e-3);\n}\n\nBOOST_AUTO_TEST_CASE(assertClose)\n{\n\tBOOST_CHECK(!assert_close(1.1, 1.2, 1e-4, 1e-4));\n\tBOOST_CHECK(assert_close(1.1, 1.2, 1, 1));\n}\n\nBOOST_AUTO_TEST_CASE(P_GaussianMixture)\n{\n\t// TODO\n}\n\nBOOST_AUTO_TEST_CASE(P_SwissRoll)\n{\n\t// TODO\n}\n\nBOOST_AUTO_TEST_CASE(P_GumbelSampler)\n{\n\tEigen::Tensor gumbel_samples = GumbelSampler(2, 3);\n\tBOOST_CHECK_LE(gumbel_samples(0, 0), 10);\n\tBOOST_CHECK_GE(gumbel_samples(0, 0), -10);\n\tBOOST_CHECK_LE(gumbel_samples(1, 2), 10);\n\tBOOST_CHECK_GE(gumbel_samples(1, 2), -10);\n\tstd::cout << gumbel_samples << std::endl;\n}\n\nBOOST_AUTO_TEST_CASE(P_GaussianSampler)\n{\n\tEigen::Tensor gaussian_samples = GaussianSampler(2, 3);\n\tBOOST_CHECK_LE(gaussian_samples(0, 0), 2);\n\tBOOST_CHECK_GE(gaussian_samples(0, 0), -2);\n\tBOOST_CHECK_LE(gaussian_samples(1, 2), 2);\n\tBOOST_CHECK_GE(gaussian_samples(1, 2), -2);\n\tstd::cout << gaussian_samples << std::endl;\n}\n\nBOOST_AUTO_TEST_SUITE_END()", "meta": {"hexsha": "c91039be7df71ef51bfcfbe0011838632fff0ad3", "size": 11138, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/tests/class_tests/evonet/source/Preprocessing_test.cpp", "max_stars_repo_name": "dmccloskey/smartPeak_cpp", "max_stars_repo_head_hexsha": "47a19a804b65daef712418b4e278704b340d20b9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/tests/class_tests/evonet/source/Preprocessing_test.cpp", "max_issues_repo_name": "dmccloskey/smartPeak_cpp", "max_issues_repo_head_hexsha": "47a19a804b65daef712418b4e278704b340d20b9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7.0, "max_issues_repo_issues_event_min_datetime": "2018-01-11T20:39:09.000Z", "max_issues_repo_issues_event_max_datetime": "2018-01-11T21:02:31.000Z", "max_forks_repo_path": "src/tests/class_tests/evonet/source/Preprocessing_test.cpp", "max_forks_repo_name": "dmccloskey/smartPeak_cpp", "max_forks_repo_head_hexsha": "47a19a804b65daef712418b4e278704b340d20b9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.9573170732, "max_line_length": 92, "alphanum_fraction": 0.694828515, "num_tokens": 4011, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7606506526772884, "lm_q2_score": 0.6548947290421275, "lm_q1q2_score": 0.49814610308081025}} {"text": "/* Boost test/mul.cpp\n * test multiplication, division, square and square root on some intervals\n *\n * Copyright 2002-2003 Guillaume Melquiond\n *\n * Distributed under the Boost Software License, Version 1.0.\n * (See accompanying file LICENSE_1_0.txt or\n * copy at http://www.boost.org/LICENSE_1_0.txt)\n */\n\n#include \n#include \n#include \"bugs.hpp\"\n\ntypedef boost::numeric::interval I;\n\nstatic double min BOOST_PREVENT_MACRO_SUBSTITUTION (double a, double b, double c, double d) {\n return (std::min)((std::min)(a, b), (std::min)(c, d));\n}\n\nstatic double max BOOST_PREVENT_MACRO_SUBSTITUTION (double a, double b, double c, double d) {\n return (std::max)((std::max)(a, b), (std::max)(c, d));\n}\n\nstatic bool test_mul(double al, double au, double bl, double bu) {\n I a(al, au), b(bl, bu);\n I c = a * b;\n return c.lower() == (min)(al*bl, al*bu, au*bl, au*bu)\n && c.upper() == (max)(al*bl, al*bu, au*bl, au*bu);\n}\n\nstatic bool test_mul1(double ac, double bl, double bu) {\n I a(ac), b(bl, bu);\n I c = ac * b;\n I d = b * ac;\n I e = a * b;\n return equal(c, d) && equal(d, e);\n}\n\nstatic bool test_div(double al, double au, double bl, double bu) {\n I a(al, au), b(bl, bu);\n I c = a / b;\n return c.lower() == (min)(al/bl, al/bu, au/bl, au/bu)\n && c.upper() == (max)(al/bl, al/bu, au/bl, au/bu);\n}\n\nstatic bool test_div1(double al, double au, double bc) {\n I a(al, au), b(bc);\n I c = a / bc;\n I d = a / b;\n return equal(c, d);\n}\n\nstatic bool test_div2(double ac, double bl, double bu) {\n I a(ac), b(bl, bu);\n I c = ac / b;\n I d = a / b;\n return equal(c, d);\n}\n\nstatic bool test_square(double al, double au) {\n I a(al, au);\n I b = square(a);\n I c = a * a;\n return b.upper() == c.upper() &&\n (b.lower() == c.lower() || (c.lower() <= 0 && b.lower() == 0));\n}\n\nstatic bool test_sqrt(double al, double au) {\n I a(al, au);\n I b = square(sqrt(a));\n return subset(abs(a), b);\n}\n\nint test_main(int, char*[]) {\n BOOST_CHECK(test_mul(2, 3, 5, 7));\n BOOST_CHECK(test_mul(2, 3, -5, 7));\n BOOST_CHECK(test_mul(2, 3, -7, -5));\n BOOST_CHECK(test_mul(-2, 3, 5, 7));\n BOOST_CHECK(test_mul(-2, 3, -5, 7));\n BOOST_CHECK(test_mul(-2, 3, -7, -5));\n BOOST_CHECK(test_mul(-3, -2, 5, 7));\n BOOST_CHECK(test_mul(-3, -2, -5, 7));\n BOOST_CHECK(test_mul(-3, -2, -7, -5));\n\n BOOST_CHECK(test_mul1(3, 5, 7));\n BOOST_CHECK(test_mul1(3, -5, 7));\n BOOST_CHECK(test_mul1(3, -7, -5));\n BOOST_CHECK(test_mul1(-3, 5, 7));\n BOOST_CHECK(test_mul1(-3, -5, 7));\n BOOST_CHECK(test_mul1(-3, -7, -5));\n\n BOOST_CHECK(test_div(30, 42, 2, 3));\n BOOST_CHECK(test_div(30, 42, -3, -2));\n BOOST_CHECK(test_div(-30, 42, 2, 3));\n BOOST_CHECK(test_div(-30, 42, -3, -2));\n BOOST_CHECK(test_div(-42, -30, 2, 3));\n BOOST_CHECK(test_div(-42, -30, -3, -2));\n\n BOOST_CHECK(test_div1(30, 42, 3));\n BOOST_CHECK(test_div1(30, 42, -3));\n BOOST_CHECK(test_div1(-30, 42, 3));\n BOOST_CHECK(test_div1(-30, 42, -3));\n BOOST_CHECK(test_div1(-42, -30, 3));\n BOOST_CHECK(test_div1(-42, -30, -3));\n\n BOOST_CHECK(test_div2(30, 2, 3));\n BOOST_CHECK(test_div2(30, -3, -2));\n BOOST_CHECK(test_div2(-30, 2, 3));\n BOOST_CHECK(test_div2(-30, -3, -2));\n\n BOOST_CHECK(test_square(2, 3));\n BOOST_CHECK(test_square(-2, 3));\n BOOST_CHECK(test_square(-3, 2));\n\n BOOST_CHECK(test_sqrt(2, 3));\n BOOST_CHECK(test_sqrt(5, 7));\n BOOST_CHECK(test_sqrt(-1, 2));\n\n# ifdef __BORLANDC__\n ::detail::ignore_warnings();\n# endif\n return 0;\n}\n", "meta": {"hexsha": "118acf325ab06da76a93f5768b35cd0be60b9fc8", "size": 3471, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "boost/libs/numeric/interval/test/mul.cpp", "max_stars_repo_name": "randolphwong/mcsema", "max_stars_repo_head_hexsha": "eb5b376736e7f57ff0a61f7e4e5a436bbb874720", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 12278.0, "max_stars_repo_stars_event_min_datetime": "2015-01-29T17:11:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T21:12:00.000Z", "max_issues_repo_path": "boost/libs/numeric/interval/test/mul.cpp", "max_issues_repo_name": "randolphwong/mcsema", "max_issues_repo_head_hexsha": "eb5b376736e7f57ff0a61f7e4e5a436bbb874720", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 9469.0, "max_issues_repo_issues_event_min_datetime": "2015-01-30T05:33:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:17:21.000Z", "max_forks_repo_path": "boost/libs/numeric/interval/test/mul.cpp", "max_forks_repo_name": "randolphwong/mcsema", "max_forks_repo_head_hexsha": "eb5b376736e7f57ff0a61f7e4e5a436bbb874720", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1343.0, "max_forks_repo_forks_event_min_datetime": "2017-12-08T19:47:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T11:31:36.000Z", "avg_line_length": 27.768, "max_line_length": 93, "alphanum_fraction": 0.6145203111, "num_tokens": 1241, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7606506635289836, "lm_q2_score": 0.6548947155710233, "lm_q1q2_score": 0.49814609994072384}} {"text": "#pragma once\n\n#include \"craam/RMDP.hpp\"\n\n#include \n#include \n\nnamespace craam{namespace algorithms{\n\nusing namespace std;\nusing namespace Eigen;\n\n/// Internal helper functions\nnamespace internal{\n\n /// Helper function to deal with variable indexing\n template\n inline Transition mean_transition_state(const SType& state, long index, const pair>& policies){\n return state.mean_transition(policies.first[index], policies.second[index]);\n }\n\n /// Helper function to deal with variable indexing\n template\n inline Transition mean_transition_state(const SType& state, long index, const indvec& policy){\n return state.mean_transition(policy[index]);\n }\n\n /// Helper function to deal with variable indexing\n template\n inline prec_t mean_reward_state(const SType& state, long index, const pair>& policies){\n return state.mean_reward(policies.first[index], policies.second[index]);\n }\n\n /// Helper function to deal with variable indexing\n template\n inline prec_t mean_reward_state(const SType& state, long index, const indvec& policy){\n return state.mean_reward(policy[index]);\n }\n}\n\n/**\nConstructs the transition (or its transpose) matrix for the policy.\n\n\\tparam SType Type of the state in the MDP (regular vs robust)\n\\tparam Policy Type of the policy. Either a single policy for\n the standard MDP evaluation, or a pair of a deterministic \n policy and a randomized policy of the nature\n\\param rmdp Regular or robust MDP\n\\param policies The policy (indvec) or the pair of the policy and the policy\n of nature (pair >). The nature is typically \n a randomized policy\n\\param transpose (optional, false) Whether to return the transpose of the transition matrix. \n This is useful for computing occupancy frequencies\n*/\ntemplate \ninline MatrixXd transition_mat(const GRMDP& rmdp, const Policies& policies, bool transpose = false) {\n const size_t n = rmdp.state_count();\n MatrixXd result = MatrixXd::Zero(n,n);\n\n const auto& states = rmdp.get_states();\n #pragma omp parallel for\n for(size_t s = 0; s < n; s++){\n const Transition&& t = internal::mean_transition_state(states[s], s, policies);\n\n const auto& indexes = t.get_indices();\n const auto& probabilities = t.get_probabilities();\n\n if(!transpose){\n for(size_t j=0; j < t.size(); j++)\n result(s,indexes[j]) = probabilities[j];\n }else{\n for(size_t j=0; j < t.size(); j++)\n result(indexes[j],s) = probabilities[j];\n }\n }\n return result;\n}\n\n/**\nConstructs the rewards vector for each state for the RMDP.\n\n\\tparam Policy Type of the policy. Either a single policy for\n the standard MDP evaluation, or a pair of a deterministic \n policy and a randomized policy of the nature\n\\param rmdp Regular or robust MDP\n\\param policies The policy (indvec) or the pair of the policy and the policy\n of nature (pair >). The nature is typically \n a randomized policy\n */\ntemplate\ninline numvec rewards_vec(const GRMDP& rmdp, const Policy& policies){\n \n const auto n = rmdp.state_count();\n numvec rewards(n);\n\n #pragma omp parallel for\n for(size_t s=0; s < n; s++){\n const SType& state = rmdp[s];\n if(state.is_terminal())\n rewards[s] = 0;\n else\n rewards[s] = internal::mean_reward_state(state, s, policies);\n }\n return rewards;\n}\n\n/**\nComputes occupancy frequencies using matrix representation of transition\nprobabilities. This method may not scale well\n\n\n\\tparam SType Type of the state in the MDP (regular vs robust)\n\\tparam Policy Type of the policy. Either a single policy for\n the standard MDP evaluation, or a pair of a deterministic \n policy and a randomized policy of the nature\n\\param init Initial distribution (alpha)\n\\param discount Discount factor (gamma)\n\\param policies The policy (indvec) or the pair of the policy and the policy\n of nature (pair >). The nature is typically \n a randomized policy\n*/\ntemplate\ninline numvec \noccfreq_mat(const GRMDP& rmdp, const Transition& init, prec_t discount,\n const Policies& policies) {\n const auto n = rmdp.state_count();\n\n // initial distribution\n const numvec& ivec = init.probabilities_vector(n);\n const VectorXd initial_vec = Map(ivec.data(),ivec.size());\n\n // get transition matrix and construct (I - gamma * P^T)\n MatrixXd t_mat = MatrixXd::Identity(n,n) - discount * transition_mat(rmdp, policies, true);\n\n // solve set of linear equations\n numvec result(n,0);\n Map(result.data(),result.size()) = HouseholderQR(t_mat).solve(initial_vec);\n\n return result;\n}\n\n}}\n", "meta": {"hexsha": "02b75a82706ec586d3287a70158c77cff098e73e", "size": 5149, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "craam/algorithms/occupancies.hpp", "max_stars_repo_name": "marekpetrik/CRAAM", "max_stars_repo_head_hexsha": "62cc392e876b5383faa5cb15ab1f6b70b26ff395", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 22.0, "max_stars_repo_stars_event_min_datetime": "2015-09-28T14:41:00.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-03T00:16:19.000Z", "max_issues_repo_path": "craam/algorithms/occupancies.hpp", "max_issues_repo_name": "marekpetrik/CRAAM", "max_issues_repo_head_hexsha": "62cc392e876b5383faa5cb15ab1f6b70b26ff395", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6.0, "max_issues_repo_issues_event_min_datetime": "2017-08-10T18:35:40.000Z", "max_issues_repo_issues_event_max_datetime": "2018-10-13T01:38:04.000Z", "max_forks_repo_path": "craam/algorithms/occupancies.hpp", "max_forks_repo_name": "marekpetrik/CRAAM", "max_forks_repo_head_hexsha": "62cc392e876b5383faa5cb15ab1f6b70b26ff395", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10.0, "max_forks_repo_forks_event_min_datetime": "2016-09-19T18:31:07.000Z", "max_forks_repo_forks_event_max_datetime": "2018-07-05T08:59:45.000Z", "avg_line_length": 36.006993007, "max_line_length": 121, "alphanum_fraction": 0.6879005632, "num_tokens": 1154, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7606506635289836, "lm_q2_score": 0.6548947155710233, "lm_q1q2_score": 0.49814609994072384}} {"text": "////////////////////////////////////////////////////////////////////////////////\n/// Copyright 2018-present Xinyan DAI\n///\n/// permission is hereby granted, free of charge, to any person obtaining a copy\n/// of this software and associated documentation files (the \"Software\"), to\n/// deal in the Software without restriction, including without limitation the\n/// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n/// sell copies of the Software, and to permit persons to whom the Software is\n/// furnished to do so, subject to the following conditions:\n///\n/// The above copyright notice and this permission notice shall be included in\n/// all copies or substantial portions ofthe Software.\n///\n/// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n/// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n/// IN THE SOFTWARE.\n\n/// @version 0.1\n/// @author Xinyan DAI\n/// @contact xinyan.dai@outlook.com\n//////////////////////////////////////////////////////////////////////////////\n#pragma once\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \"index.hpp\"\n\nnamespace ss {\n template\n class Node {\n protected:\n DataType _medians;\n vector _projector;\n vector _idx;\n Node *_left;\n Node *_right;\n const u_int _depth;\n const u_int _dim;\n const u_int _max_depth;\n\n public:\n explicit Node(u_int dim, u_int depth, u_int max_depth) :\n _depth(depth), _max_depth(max_depth), _dim(dim), _projector(dim) {\n }\n\n bool IsLeaf() {\n return this->_depth == this->_max_depth;\n }\n\n void _GenerateProjector(const Matrix &data, const vector &idx) {\n // TODO(Xinyan) choose the eigenvector as random projection vector\n std::default_random_engine generator;\n std::normal_distribution distribution(0.0, 1.0);\n\n for (int j=0; j<_dim; j++) {\n this->_projector[j] = distribution(generator);\n }\n }\n\n void MakeTree(const Matrix &data, const vector & idx) {\n if (IsLeaf()) {\n _idx = idx;\n return;\n }\n int N = idx.size();\n this->_GenerateProjector(data, idx);\n vector projected_value(idx.size());\n for (int i = 0; i < idx.size(); ++i) {\n projected_value[i] = ss::InnerProduct(data[idx[i]], _projector.data(), _projector.size());\n }\n vector sorted_idx = ss::SortIndexes(projected_value);\n vector sorted_left_inx = vector(sorted_idx.begin(), sorted_idx.begin() + N/2);\n vector sorted_right_inx = vector(sorted_idx.begin() + N/2, sorted_idx.end());\n\n this->_medians = (projected_value[sorted_idx[N/2-1]]+ projected_value[sorted_idx[N/2+1]]) / 2.0;\n\n this->_left = new Node(_dim, _depth + 1, _max_depth);\n this->_right = new Node(_dim, _depth + 1, _max_depth);\n\n this->_left->MakeTree(data, ss::FancyIndex(idx, sorted_left_inx));\n this->_right->MakeTree(data, ss::FancyIndex(idx, sorted_right_inx));\n }\n\n const vector& ProbeLeaf(const DataType * query) {\n if (IsLeaf()) {\n return this->_idx;\n }\n DataType projected_value= ss::InnerProduct(query, _projector.data(), _projector.size());\n if (projected_value < _medians) {\n return this->_left->ProbeLeaf(query);\n } else {\n return this->_right->ProbeLeaf(query);\n }\n }\n\n void LeafIdx(vector * > & leafIdx) {\n if (IsLeaf()) {\n leafIdx.push_back(&this->_idx);\n } else {\n\n this->_left->LeafIdx(leafIdx);\n this->_right->LeafIdx(leafIdx);\n }\n }\n\n };\n\n template\n class RPTIndex: public Index {\n protected:\n Node _root;\n public:\n explicit RPTIndex(const parameter & para) :\n Index (para),\n _root(para.dim, 0, para.num_bit) {\n }\n\n ~RPTIndex() {}\n\n void Train(const Matrix & data) override {\n this->_root.MakeTree(data, ss::Range(0, data.getSize()));\n\n }\n\n void Add(const Matrix &data) override {\n }\n\n vector * > LeafIdx() {\n vector * > leafIdx;\n leafIdx.reserve(1 << this->_para.num_bit);\n this->_root.LeafIdx(leafIdx);\n return leafIdx;\n }\n\n void Search(const DataType* query, const std::function& prober) override {\n const vector& idx = this->_root.ProbeLeaf(query);\n for (int id : idx) {\n prober(id);\n }\n }\n };\n} // namespace ss\n\n\n// ------------------------- implementation -------------------------\n\n", "meta": {"hexsha": "4b33fe4081d9ac97d0bf573d25cf3513eb6d8f14", "size": 5794, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/include/index/rptree.hpp", "max_stars_repo_name": "xinyandai/similarity-search", "max_stars_repo_head_hexsha": "75dc71abdd7f79094475db734fe55d04358363fc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16.0, "max_stars_repo_stars_event_min_datetime": "2018-11-17T00:51:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T22:51:56.000Z", "max_issues_repo_path": "src/include/index/rptree.hpp", "max_issues_repo_name": "xinyandai/similarity-search", "max_issues_repo_head_hexsha": "75dc71abdd7f79094475db734fe55d04358363fc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/include/index/rptree.hpp", "max_forks_repo_name": "xinyandai/similarity-search", "max_forks_repo_head_hexsha": "75dc71abdd7f79094475db734fe55d04358363fc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9.0, "max_forks_repo_forks_event_min_datetime": "2018-11-14T08:08:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-08T02:42:58.000Z", "avg_line_length": 36.6708860759, "max_line_length": 108, "alphanum_fraction": 0.5495340007, "num_tokens": 1250, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7606506526772883, "lm_q2_score": 0.6548947223065755, "lm_q1q2_score": 0.4981460979574081}} {"text": "/*\n For more information, please see: http://software.sci.utah.edu\n \n The MIT License\n \n Copyright (c) 2015 Scientific Computing and Imaging Institute,\n University of Utah.\n \n \n Permission is hereby granted, free of charge, to any person obtaining a\n copy of this software and associated documentation files (the \"Software\"),\n to deal in the Software without restriction, including without limitation\n the rights to use, copy, modify, merge, publish, distribute, sublicense,\n and/or sell copies of the Software, and to permit persons to whom the\n Software is furnished to do so, subject to the following conditions:\n \n The above copyright notice and this permission notice shall be included\n in all copies or substantial portions of the Software.\n \n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n DEALINGS IN THE SOFTWARE.\n*/\n\n#include \n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n\nusing namespace boost::assign; \nusing namespace SCIRun;\nusing namespace SCIRun::TestUtils;\n\nTEST(InvertMatrixTest, InvertZeroException)\n{\n const int dim = 3;\n DenseMatrix zero_matrix(dim, dim);\n zero_matrix.zero();\n \n EXPECT_THROW(lapackinvert(zero_matrix.get_data_pointer(), dim), LapackError);\n}\n\nTEST(InvertMatrixTest, InvertArgException)\n{\n const int dim = 5;\n const int bad_dim = 4;\n DenseMatrixHandle identity(DenseMatrix::identity(dim));\n DenseMatrixHandle identityOriginal(DenseMatrix::identity(dim));\n \n EXPECT_THROW(lapackinvert(identity->get_data_pointer(), bad_dim), LapackError);\n \n EXPECT_FALSE(compare_exactly(*identity, *identityOriginal));\n}\n\nTEST(SolveLinSysWithLapackTest, SolveLinSysZeroException)\n{\n const int dim = 6;\n DenseMatrix zero_matrix(dim, dim);\n zero_matrix.zero();\n ColumnMatrix rhs = MAKE_COLUMN_MATRIX((107) (60) (71) (43) (82));\n\n EXPECT_THROW(lapacksolvelinearsystem(zero_matrix.get_raw_2D_pointer(), dim, dim,\n rhs.get_data_pointer(), dim, 1),\n LapackError);\n}\n\nTEST(InvertMatrixTest, CanInvertIdentity)\n{\n const int dim = 3;\n DenseMatrixHandle identity(DenseMatrix::identity(dim));\n DenseMatrixHandle identityOriginal(DenseMatrix::identity(dim));\n \n EXPECT_NO_THROW(lapackinvert(identity->get_data_pointer(), dim));\n \n EXPECT_TRUE(compare_exactly(*identity, *identityOriginal));\n}\n\nTEST(SolveLinSysWithLapackTest, SolvingSimpleCase)\n{\n DenseMatrix M = MAKE_DENSE_MATRIX(\n (2, 4, 7, 9, 8)\n (6, 9, 2, 5, 2)\n (6, 3, 5, 1, 8)\n (1, 5, 6, 1, 2)\n (1, 2, 8, 2, 9));\n \n ColumnMatrix rhs = MAKE_COLUMN_MATRIX((107) (60) (71) (43) (82));\n \n EXPECT_NO_THROW(lapacksolvelinearsystem(M.get_raw_2D_pointer(), 5, 5, rhs.get_data_pointer(), 5, 1));\n \n EXPECT_COLUMN_MATRIX_EQ_TO(rhs,\n (1.0)\n (2.0)\n (3.0)\n (4.0)\n (5.0));\n\n}\n\nTEST(InvertMatrixTest, CanInvertWithMemberFunction)\n{\n const int rows = 3, cols = 3;\n\n DenseMatrix m = MAKE_DENSE_MATRIX(\n (1, 0, 1)\n (0, 2, 0)\n (0, 0, -1));\n\n const MatrixHandle original(m.clone());\n //std::cout << \"Matrix:\" << std::endl;\n //std::cout << matrix_to_string(m) << std::endl;\n\n EXPECT_TRUE(m.invert());\n const MatrixHandle inverseFromMethod(m.clone());\n //std::cout << \"Inverse from method:\" << std::endl;\n //std::cout << to_string(inverseFromMethod) << std::endl;\n\n EXPECT_TRUE(m.invert());\n //std::cout << \"Back to original matrix:\" << std::endl;\n //std::cout << matrix_to_string(m) << std::endl;\n\n //std::cout << \"Inversion via direct call to lapack:\" << std::endl;\n\n EXPECT_NO_THROW(lapackinvert(m.get_data_pointer(), rows));\n\n //std::cout << matrix_to_string(m) << std::endl;\n {\n const MatrixHandle inverseFromDirectLapack(m.clone());\n\n //std::cout << \"Difference matrix:\" << std::endl;\n MatrixHandle diff = inverseFromDirectLapack - inverseFromMethod;\n //std::cout << to_string(diff) << std::endl;\n\n DenseMatrixHandle zero(DenseMatrix::zero_matrix(rows, cols));\n\n EXPECT_TRUE(compare_exactly(*diff, *zero));\n }\n\n MatrixHandle id3(DenseMatrix::identity(rows));\n MatrixHandle product = inverseFromMethod * original;\n EXPECT_TRUE(compare_exactly(*id3, *product));\n}\n\n//Note: this test can be a template for further lapack function testing.\nTEST(SVDTest, ExampleFromWikiPage)\n{\n const int rows = 4, cols = 5;\n\n DenseMatrix m = MAKE_DENSE_MATRIX(\n (1, 0, 0, 0, 2)\n (0, 0, 3, 0, 0)\n (0, 0, 0, 0, 0)\n (0, 4, 0, 0, 0));\n\n DenseMatrix u(rows, rows);\n ColumnMatrix s(rows);\n DenseMatrix v_transpose(cols, cols);\n\n EXPECT_NO_THROW(lapacksvd(m.get_raw_2D_pointer(), rows, cols,\n s.get_data_pointer(),\n u.get_raw_2D_pointer(),\n v_transpose.get_raw_2D_pointer()));\n\n EXPECT_MATRIX_EQ_TO(u, \n (0,0,1,0)\n (0,1,0,0)\n (0,0,0,-1)\n (1,0,0,0));\n\n EXPECT_COLUMN_MATRIX_EQ_TO(s,\n (4.0)\n (3.0)\n (2.23606798)\n (0));\n\n EXPECT_MATRIX_EQ_TO(v_transpose,\n (0.0,1.0,0.0,0.0,0.0)\n (0, 0, 1, 0, 0)\n (sqrt(0.2), 0, 0, 0, sqrt(0.8))\n (0, 0, 0, 1, 0)\n (-sqrt(0.8), 0, 0, 0, sqrt(0.2)));\n\n MatrixHandle U(u.clone());\n MatrixHandle fullS(DenseMatrix::make_diagonal_from_column(s, rows, cols));\n MatrixHandle V_transpose(v_transpose.clone());\n\n EXPECT_MATRIX_EQ(m, *(U * fullS * V_transpose));\n}\n\n", "meta": {"hexsha": "624f5728a21e054986390d8c4b65516e4181d25f", "size": 5979, "ext": "cc", "lang": "C++", "max_stars_repo_path": "src/Core/Math/Tests/LapackWrapperTests.cc", "max_stars_repo_name": "mhansen1/SCIRun", "max_stars_repo_head_hexsha": "9719c570a6d6911a9eb8df584bd2c4ad8b8cd2ba", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Core/Math/Tests/LapackWrapperTests.cc", "max_issues_repo_name": "mhansen1/SCIRun", "max_issues_repo_head_hexsha": "9719c570a6d6911a9eb8df584bd2c4ad8b8cd2ba", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Core/Math/Tests/LapackWrapperTests.cc", "max_forks_repo_name": "mhansen1/SCIRun", "max_forks_repo_head_hexsha": "9719c570a6d6911a9eb8df584bd2c4ad8b8cd2ba", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4532019704, "max_line_length": 105, "alphanum_fraction": 0.6872386687, "num_tokens": 1711, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.6548947290421275, "lm_q2_score": 0.7606506418255928, "lm_q1q2_score": 0.49814609597409204}} {"text": "// (C) Copyright John Maddock 2006.\n// Use, modification and distribution are subject to the\n// Boost Software License, Version 1.0. (See accompanying file\n// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n\n#define BOOST_UBLAS_TYPE_CHECK_EPSILON (type_traits::type_sqrt (boost::math::tools::epsilon ()))\n#define BOOST_UBLAS_TYPE_CHECK_MIN (type_traits::type_sqrt ( boost::math::tools::min_value()))\n#define BOOST_UBLAS_NDEBUG\n\n#include \nnamespace std{\nusing boost::math::ntl::pow;\n} // workaround for spirit parser.\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include // for test_main\n\nextern boost::math::ntl::RR f(const boost::math::ntl::RR& x, int variant);\nextern void show_extra(\n const boost::math::tools::polynomial& n, \n const boost::math::tools::polynomial& d, \n const boost::math::ntl::RR& x_offset, \n const boost::math::ntl::RR& y_offset, \n int variant);\n\nusing namespace boost::spirit::classic;\n\nboost::math::ntl::RR a(0), b(1); // range to optimise over\nbool rel_error(true);\nbool pin(false);\nint orderN(3);\nint orderD(1);\nint target_precision = boost::math::tools::digits();\nint working_precision = target_precision * 2;\nbool started(false);\nint variant(0);\nint skew(0);\nint brake(50);\nboost::math::ntl::RR x_offset(0), y_offset(0), x_scale(1);\nbool auto_offset_y;\n\nboost::shared_ptr > p_remez;\n\nboost::math::ntl::RR the_function(const boost::math::ntl::RR& val)\n{\n return f(x_scale * (val + x_offset), variant) + y_offset;\n}\n\nvoid step_some(unsigned count)\n{\n try{\n NTL::RR::SetPrecision(working_precision);\n if(!started)\n {\n //\n // If we have an automatic y-offset calculate it now:\n //\n if(auto_offset_y)\n {\n boost::math::ntl::RR fa, fb, fm;\n fa = f(x_scale * (a + x_offset), variant);\n fb = f(x_scale * (b + x_offset), variant);\n fm = f(x_scale * ((a+b)/2 + x_offset), variant);\n y_offset = -(fa + fb + fm) / 3;\n NTL::RR::SetOutputPrecision(5);\n std::cout << \"Setting auto-y-offset to \" << y_offset << std::endl;\n }\n //\n // Truncate offsets to float precision:\n //\n x_offset = NTL::RoundToPrecision(x_offset.value(), 20);\n y_offset = NTL::RoundToPrecision(y_offset.value(), 20);\n //\n // Construct new Remez state machine:\n //\n p_remez.reset(new boost::math::tools::remez_minimax(\n &the_function, \n orderN, orderD, \n a, b, \n pin, \n rel_error, \n skew, \n working_precision));\n std::cout << \"Max error in interpolated form: \" << std::setprecision(3) << std::scientific << boost::math::tools::real_cast(p_remez->max_error()) << std::endl;\n //\n // Signal that we've started:\n //\n started = true;\n }\n unsigned i;\n for(i = 0; i < count; ++i)\n {\n std::cout << \"Stepping...\" << std::endl;\n p_remez->set_brake(brake);\n boost::math::ntl::RR r = p_remez->iterate();\n NTL::RR::SetOutputPrecision(3);\n std::cout \n << \"Maximum Deviation Found: \" << std::setprecision(3) << std::scientific << boost::math::tools::real_cast(p_remez->max_error()) << std::endl\n << \"Expected Error Term: \" << std::setprecision(3) << std::scientific << boost::math::tools::real_cast(p_remez->error_term()) << std::endl\n << \"Maximum Relative Change in Control Points: \" << std::setprecision(3) << std::scientific << boost::math::tools::real_cast(r) << std::endl;\n }\n }\n catch(const std::exception& e)\n {\n std::cout << \"Step failed with exception: \" << e.what() << std::endl;\n }\n}\n\nvoid step(const char*, const char*)\n{\n step_some(1);\n}\n\nvoid show(const char*, const char*)\n{\n NTL::RR::SetPrecision(working_precision);\n if(started)\n {\n boost::math::tools::polynomial n = p_remez->numerator();\n boost::math::tools::polynomial d = p_remez->denominator();\n std::vector cn = n.chebyshev();\n std::vector cd = d.chebyshev();\n int prec = 2 + (target_precision * 3010LL)/10000;\n std::cout << std::scientific << std::setprecision(prec);\n NTL::RR::SetOutputPrecision(prec);\n boost::numeric::ublas::vector v = p_remez->zero_points();\n \n std::cout << \" Zeros = {\\n\";\n unsigned i;\n for(i = 0; i < v.size(); ++i)\n {\n std::cout << \" \" << v[i] << std::endl;\n }\n std::cout << \" }\\n\";\n\n v = p_remez->chebyshev_points();\n std::cout << \" Chebeshev Control Points = {\\n\";\n for(i = 0; i < v.size(); ++i)\n {\n std::cout << \" \" << v[i] << std::endl;\n }\n std::cout << \" }\\n\";\n\n std::cout << \"X offset: \" << x_offset << std::endl;\n std::cout << \"X scale: \" << x_scale << std::endl;\n std::cout << \"Y offset: \" << y_offset << std::endl;\n\n std::cout << \"P = {\";\n for(i = 0; i < n.size(); ++i)\n {\n std::cout << \" \" << n[i] << \"L,\" << std::endl;\n }\n std::cout << \" }\\n\";\n\n std::cout << \"Q = {\";\n for(i = 0; i < d.size(); ++i)\n {\n std::cout << \" \" << d[i] << \"L,\" << std::endl;\n }\n std::cout << \" }\\n\";\n\n std::cout << \"CP = {\";\n for(i = 0; i < cn.size(); ++i)\n {\n std::cout << \" \" << cn[i] << \"L,\" << std::endl;\n }\n std::cout << \" }\\n\";\n\n std::cout << \"CQ = {\";\n for(i = 0; i < cd.size(); ++i)\n {\n std::cout << \" \" << cd[i] << \"L,\" << std::endl;\n }\n std::cout << \" }\\n\";\n\n show_extra(n, d, x_offset, y_offset, variant);\n }\n else\n {\n std::cerr << \"Nothing to display\" << std::endl;\n }\n}\n\nvoid do_graph(unsigned points)\n{\n NTL::RR::SetPrecision(working_precision);\n boost::math::ntl::RR step = (b - a) / (points - 1);\n boost::math::ntl::RR x = a;\n while(points > 1)\n {\n NTL::RR::SetOutputPrecision(10);\n std::cout << std::setprecision(10) << std::setw(30) << std::left \n << boost::lexical_cast(x) << the_function(x) << std::endl;\n --points;\n x += step;\n }\n std::cout << std::setprecision(10) << std::setw(30) << std::left \n << boost::lexical_cast(b) << the_function(b) << std::endl;\n}\n\nvoid graph(const char*, const char*)\n{\n do_graph(3);\n}\n\ntemplate \nvoid do_test(T, const char* name)\n{\n boost::math::ntl::RR::SetPrecision(working_precision);\n if(started)\n {\n //\n // We want to test the approximation at fixed precision:\n // either float, double or long double. Begin by getting the\n // polynomials:\n //\n boost::math::tools::polynomial n, d;\n boost::math::tools::polynomial nr, dr;\n nr = p_remez->numerator();\n dr = p_remez->denominator();\n n = nr;\n d = dr;\n\n std::vector cn1, cd1;\n cn1 = nr.chebyshev();\n cd1 = dr.chebyshev();\n std::vector cn, cd;\n for(unsigned i = 0; i < cn1.size(); ++i)\n {\n cn.push_back(boost::math::tools::real_cast(cn1[i]));\n }\n for(unsigned i = 0; i < cd1.size(); ++i)\n {\n cd.push_back(boost::math::tools::real_cast(cd1[i]));\n }\n //\n // We'll test at the Chebeshev control points which is where\n // (in theory) the largest deviation should occur. For good\n // measure we'll test at the zeros as well:\n //\n boost::numeric::ublas::vector \n zeros(p_remez->zero_points()),\n cheb(p_remez->chebyshev_points());\n\n boost::math::ntl::RR max_error(0), cheb_max_error(0);\n\n //\n // Do the tests at the zeros:\n //\n std::cout << \"Starting tests at \" << name << \" precision...\\n\";\n std::cout << \"Absissa Error (Poly) Error (Cheb)\\n\";\n for(unsigned i = 0; i < zeros.size(); ++i)\n {\n boost::math::ntl::RR true_result = the_function(zeros[i]);\n T absissa = boost::math::tools::real_cast(zeros[i]);\n boost::math::ntl::RR test_result = n.evaluate(absissa) / d.evaluate(absissa);\n boost::math::ntl::RR cheb_result = boost::math::tools::evaluate_chebyshev(cn, absissa) / boost::math::tools::evaluate_chebyshev(cd, absissa);\n boost::math::ntl::RR err, cheb_err;\n if(rel_error)\n {\n err = boost::math::tools::relative_error(test_result, true_result);\n cheb_err = boost::math::tools::relative_error(cheb_result, true_result);\n }\n else\n {\n err = fabs(test_result - true_result);\n cheb_err = fabs(cheb_result - true_result);\n }\n if(err > max_error)\n max_error = err;\n if(cheb_err > cheb_max_error)\n cheb_max_error = cheb_err;\n std::cout << std::setprecision(6) << std::setw(15) << std::left << absissa\n << std::setw(15) << std::left << boost::math::tools::real_cast(err) << boost::math::tools::real_cast(cheb_err) << std::endl;\n }\n //\n // Do the tests at the Chebeshev control points:\n //\n for(unsigned i = 0; i < cheb.size(); ++i)\n {\n boost::math::ntl::RR true_result = the_function(cheb[i]);\n T absissa = boost::math::tools::real_cast(cheb[i]);\n boost::math::ntl::RR test_result = n.evaluate(absissa) / d.evaluate(absissa);\n boost::math::ntl::RR cheb_result = boost::math::tools::evaluate_chebyshev(cn, absissa) / boost::math::tools::evaluate_chebyshev(cd, absissa);\n boost::math::ntl::RR err, cheb_err;\n if(rel_error)\n {\n err = boost::math::tools::relative_error(test_result, true_result);\n cheb_err = boost::math::tools::relative_error(cheb_result, true_result);\n }\n else\n {\n err = fabs(test_result - true_result);\n cheb_err = fabs(cheb_result - true_result);\n }\n if(err > max_error)\n max_error = err;\n std::cout << std::setprecision(6) << std::setw(15) << std::left << absissa\n << std::setw(15) << std::left << boost::math::tools::real_cast(err) << \n boost::math::tools::real_cast(cheb_err) << std::endl;\n }\n std::string msg = \"Max Error found at \";\n msg += name;\n msg += \" precision = \";\n msg.append(62 - 17 - msg.size(), ' ');\n std::cout << msg << std::setprecision(6) << \"Poly: \" << std::setw(20) << std::left\n << boost::math::tools::real_cast(max_error) << \"Cheb: \" << boost::math::tools::real_cast(cheb_max_error) << std::endl;\n }\n else\n {\n std::cout << \"Nothing to test: try converging an approximation first!!!\" << std::endl;\n }\n}\n\nvoid test_float(const char*, const char*)\n{\n do_test(float(0), \"float\");\n}\n\nvoid test_double(const char*, const char*)\n{\n do_test(double(0), \"double\");\n}\n\nvoid test_long(const char*, const char*)\n{\n do_test((long double)(0), \"long double\");\n}\n\nvoid test_all(const char*, const char*)\n{\n do_test(float(0), \"float\");\n do_test(double(0), \"double\");\n do_test((long double)(0), \"long double\");\n}\n\ntemplate \nvoid do_test_n(T, const char* name, unsigned count)\n{\n boost::math::ntl::RR::SetPrecision(working_precision);\n if(started)\n {\n //\n // We want to test the approximation at fixed precision:\n // either float, double or long double. Begin by getting the\n // polynomials:\n //\n boost::math::tools::polynomial n, d;\n boost::math::tools::polynomial nr, dr;\n nr = p_remez->numerator();\n dr = p_remez->denominator();\n n = nr;\n d = dr;\n\n std::vector cn1, cd1;\n cn1 = nr.chebyshev();\n cd1 = dr.chebyshev();\n std::vector cn, cd;\n for(unsigned i = 0; i < cn1.size(); ++i)\n {\n cn.push_back(boost::math::tools::real_cast(cn1[i]));\n }\n for(unsigned i = 0; i < cd1.size(); ++i)\n {\n cd.push_back(boost::math::tools::real_cast(cd1[i]));\n }\n\n boost::math::ntl::RR max_error(0), max_cheb_error(0);\n boost::math::ntl::RR step = (b - a) / count;\n\n //\n // Do the tests at the zeros:\n //\n std::cout << \"Starting tests at \" << name << \" precision...\\n\";\n std::cout << \"Absissa Error (poly) Error (Cheb)\\n\";\n for(boost::math::ntl::RR x = a; x <= b; x += step)\n {\n boost::math::ntl::RR true_result = the_function(x);\n T absissa = boost::math::tools::real_cast(x);\n boost::math::ntl::RR test_result = n.evaluate(absissa) / d.evaluate(absissa);\n boost::math::ntl::RR cheb_result = boost::math::tools::evaluate_chebyshev(cn, absissa) / boost::math::tools::evaluate_chebyshev(cd, absissa);\n boost::math::ntl::RR err, cheb_err;\n if(rel_error)\n {\n err = boost::math::tools::relative_error(test_result, true_result);\n cheb_err = boost::math::tools::relative_error(cheb_result, true_result);\n }\n else\n {\n err = fabs(test_result - true_result);\n cheb_err = fabs(cheb_result - true_result);\n }\n if(err > max_error)\n max_error = err;\n if(cheb_err > max_cheb_error)\n max_cheb_error = cheb_err;\n std::cout << std::setprecision(6) << std::setw(15) << std::left << boost::math::tools::real_cast(absissa)\n << (test_result < true_result ? \"-\" : \"\") << std::setw(20) << std::left \n << boost::math::tools::real_cast(err) \n << boost::math::tools::real_cast(cheb_err) << std::endl;\n }\n std::string msg = \"Max Error found at \";\n msg += name;\n msg += \" precision = \";\n //msg.append(62 - 17 - msg.size(), ' ');\n std::cout << msg << \"Poly: \" << std::setprecision(6) \n //<< std::setw(15) << std::left \n << boost::math::tools::real_cast(max_error) \n << \" Cheb: \" << boost::math::tools::real_cast(max_cheb_error) << std::endl;\n }\n else\n {\n std::cout << \"Nothing to test: try converging an approximation first!!!\" << std::endl;\n }\n}\n\nvoid test_n(unsigned n)\n{\n do_test_n(boost::math::ntl::RR(), \"boost::math::ntl::RR\", n);\n}\n\nvoid test_float_n(unsigned n)\n{\n do_test_n(float(0), \"float\", n);\n}\n\nvoid test_double_n(unsigned n)\n{\n do_test_n(double(0), \"double\", n);\n}\n\nvoid test_long_n(unsigned n)\n{\n do_test_n((long double)(0), \"long double\", n);\n}\n\nvoid rotate(const char*, const char*)\n{\n if(p_remez)\n {\n p_remez->rotate();\n }\n else\n {\n std::cerr << \"Nothing to rotate\" << std::endl;\n }\n}\n\nvoid rescale(const char*, const char*)\n{\n if(p_remez)\n {\n p_remez->rescale(a, b);\n }\n else\n {\n std::cerr << \"Nothing to rescale\" << std::endl;\n }\n}\n\nvoid graph_poly(const char*, const char*)\n{\n int i = 50;\n boost::math::ntl::RR::SetPrecision(working_precision);\n if(started)\n {\n //\n // We want to test the approximation at fixed precision:\n // either float, double or long double. Begin by getting the\n // polynomials:\n //\n boost::math::tools::polynomial n, d;\n n = p_remez->numerator();\n d = p_remez->denominator();\n\n boost::math::ntl::RR max_error(0);\n boost::math::ntl::RR step = (b - a) / i;\n\n std::cout << \"Evaluating Numerator...\\n\";\n boost::math::ntl::RR val;\n for(val = a; val <= b; val += step)\n std::cout << n.evaluate(val) << std::endl;\n std::cout << \"Evaluating Denominator...\\n\";\n for(val = a; val <= b; val += step)\n std::cout << d.evaluate(val) << std::endl;\n }\n else\n {\n std::cout << \"Nothing to test: try converging an approximation first!!!\" << std::endl;\n }\n}\n\nint test_main(int, char* [])\n{\n std::string line;\n real_parser const rr_p;\n while(std::getline(std::cin, line))\n {\n if(parse(line.c_str(), str_p(\"quit\"), space_p).full)\n return 0;\n if(false == parse(line.c_str(), \n (\n\n str_p(\"range\")[assign_a(started, false)] && real_p[assign_a(a)] && real_p[assign_a(b)]\n ||\n str_p(\"relative\")[assign_a(started, false)][assign_a(rel_error, true)]\n ||\n str_p(\"absolute\")[assign_a(started, false)][assign_a(rel_error, false)]\n ||\n str_p(\"pin\")[assign_a(started, false)] && str_p(\"true\")[assign_a(pin, true)]\n ||\n str_p(\"pin\")[assign_a(started, false)] && str_p(\"false\")[assign_a(pin, false)]\n ||\n str_p(\"pin\")[assign_a(started, false)] && str_p(\"1\")[assign_a(pin, true)]\n ||\n str_p(\"pin\")[assign_a(started, false)] && str_p(\"0\")[assign_a(pin, false)]\n ||\n str_p(\"pin\")[assign_a(started, false)][assign_a(pin, true)]\n ||\n str_p(\"order\")[assign_a(started, false)] && uint_p[assign_a(orderN)] && uint_p[assign_a(orderD)]\n ||\n str_p(\"order\")[assign_a(started, false)] && uint_p[assign_a(orderN)]\n ||\n str_p(\"target-precision\") && uint_p[assign_a(target_precision)]\n ||\n str_p(\"working-precision\")[assign_a(started, false)] && uint_p[assign_a(working_precision)]\n ||\n str_p(\"variant\")[assign_a(started, false)] && int_p[assign_a(variant)]\n ||\n str_p(\"skew\")[assign_a(started, false)] && int_p[assign_a(skew)]\n ||\n str_p(\"brake\") && int_p[assign_a(brake)]\n ||\n str_p(\"step\") && int_p[&step_some]\n ||\n str_p(\"step\")[&step]\n ||\n str_p(\"poly\")[&graph_poly]\n ||\n str_p(\"info\")[&show]\n ||\n str_p(\"graph\") && uint_p[&do_graph]\n ||\n str_p(\"graph\")[&graph]\n ||\n str_p(\"x-offset\") && real_p[assign_a(x_offset)]\n ||\n str_p(\"x-scale\") && real_p[assign_a(x_scale)]\n ||\n str_p(\"y-offset\") && str_p(\"auto\")[assign_a(auto_offset_y, true)]\n ||\n str_p(\"y-offset\") && real_p[assign_a(y_offset)][assign_a(auto_offset_y, false)]\n ||\n str_p(\"test\") && str_p(\"float\") && uint_p[&test_float_n]\n ||\n str_p(\"test\") && str_p(\"float\")[&test_float]\n ||\n str_p(\"test\") && str_p(\"double\") && uint_p[&test_double_n]\n ||\n str_p(\"test\") && str_p(\"double\")[&test_double]\n ||\n str_p(\"test\") && str_p(\"long\") && uint_p[&test_long_n]\n ||\n str_p(\"test\") && str_p(\"long\")[&test_long]\n ||\n str_p(\"test\") && str_p(\"all\")[&test_all]\n ||\n str_p(\"test\") && uint_p[&test_n]\n ||\n str_p(\"rotate\")[&rotate]\n ||\n str_p(\"rescale\") && real_p[assign_a(a)] && real_p[assign_a(b)] && epsilon_p[&rescale]\n\n ), space_p).full)\n {\n std::cout << \"Unable to parse directive: \\\"\" << line << \"\\\"\" << std::endl;\n }\n else\n {\n std::cout << \"Variant = \" << variant << std::endl;\n std::cout << \"range = [\" << a << \",\" << b << \"]\" << std::endl;\n std::cout << \"Relative Error = \" << rel_error << std::endl;\n std::cout << \"Pin to Origin = \" << pin << std::endl;\n std::cout << \"Order (Num/Denom) = \" << orderN << \"/\" << orderD << std::endl;\n std::cout << \"Target Precision = \" << target_precision << std::endl;\n std::cout << \"Working Precision = \" << working_precision << std::endl;\n std::cout << \"Skew = \" << skew << std::endl;\n std::cout << \"Brake = \" << brake << std::endl;\n std::cout << \"X Offset = \" << x_offset << std::endl;\n std::cout << \"X scale = \" << x_scale << std::endl;\n std::cout << \"Y Offset = \";\n if(auto_offset_y)\n std::cout << \"Auto (\";\n std::cout << y_offset;\n if(auto_offset_y)\n std::cout << \")\";\n std::cout << std::endl;\n }\n }\n return 0;\n}\n", "meta": {"hexsha": "8d8ab324a89c3b7fa007c1fa022740f3f293d111", "size": 20787, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "libs/math/minimax/main.cpp", "max_stars_repo_name": "ai-nikolaev/repo-cppboost", "max_stars_repo_head_hexsha": "218c4a977c6d8cd6f2864cdcea1b6ab53160d203", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 11.0, "max_stars_repo_stars_event_min_datetime": "2016-04-12T16:29:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-28T11:01:57.000Z", "max_issues_repo_path": "libs/math/minimax/main.cpp", "max_issues_repo_name": "ai-nikolaev/repo-cppboost", "max_issues_repo_head_hexsha": "218c4a977c6d8cd6f2864cdcea1b6ab53160d203", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 3.0, "max_issues_repo_issues_event_min_datetime": "2018-10-31T19:35:14.000Z", "max_issues_repo_issues_event_max_datetime": "2019-06-04T17:11:27.000Z", "max_forks_repo_path": "libs/math/minimax/main.cpp", "max_forks_repo_name": "ai-nikolaev/repo-cppboost", "max_forks_repo_head_hexsha": "218c4a977c6d8cd6f2864cdcea1b6ab53160d203", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 9.0, "max_forks_repo_forks_event_min_datetime": "2015-09-09T02:38:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-30T00:24:24.000Z", "avg_line_length": 34.1330049261, "max_line_length": 182, "alphanum_fraction": 0.5426468466, "num_tokens": 5826, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7606506418255927, "lm_q2_score": 0.6548947155710233, "lm_q1q2_score": 0.49814608572728786}} {"text": "/* ----------------------------------------------------------------------------\n\n * GTSAM Copyright 2010, Georgia Tech Research Corporation,\n * Atlanta, Georgia 30332-0415\n * All Rights Reserved\n * Authors: Frank Dellaert, et al. (see THANKS for the full author list)\n\n * See LICENSE for the license information\n\n * -------------------------------------------------------------------------- */\n\n/**\n * @file InitializePose3.h\n * @author Luca Carlone\n * @author Frank Dellaert\n * @date August, 2014\n */\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n\nusing namespace std;\n\nnamespace gtsam {\nnamespace InitializePose3 {\n\nstatic const Matrix I9 = I_9x9;\nstatic const Vector zero9 = Vector::Zero(9);\nstatic const Matrix zero33 = Z_3x3;\n\nstatic const Key keyAnchor = symbol('Z', 9999999);\n\n/* ************************************************************************* */\nGaussianFactorGraph buildLinearOrientationGraph(const NonlinearFactorGraph& g) {\n\n GaussianFactorGraph linearGraph;\n noiseModel::Unit::shared_ptr model = noiseModel::Unit::Create(9);\n\n for(const boost::shared_ptr& factor: g) {\n Matrix3 Rij;\n\n boost::shared_ptr > pose3Between =\n boost::dynamic_pointer_cast >(factor);\n if (pose3Between)\n Rij = pose3Between->measured().rotation().matrix();\n else\n std::cout << \"Error in buildLinearOrientationGraph\" << std::endl;\n\n const FastVector& keys = factor->keys();\n Key key1 = keys[0], key2 = keys[1];\n Matrix M9 = Z_9x9;\n M9.block(0,0,3,3) = Rij;\n M9.block(3,3,3,3) = Rij;\n M9.block(6,6,3,3) = Rij;\n linearGraph.add(key1, -I9, key2, M9, zero9, model);\n }\n // prior on the anchor orientation\n linearGraph.add(keyAnchor, I9, (Vector(9) << 1.0, 0.0, 0.0,/* */ 0.0, 1.0, 0.0, /* */ 0.0, 0.0, 1.0).finished(), model);\n return linearGraph;\n}\n\n/* ************************************************************************* */\n// Transform VectorValues into valid Rot3\nValues normalizeRelaxedRotations(const VectorValues& relaxedRot3) {\n gttic(InitializePose3_computeOrientationsChordal);\n\n Matrix ppm = Z_3x3; // plus plus minus\n ppm(0,0) = 1; ppm(1,1) = 1; ppm(2,2) = -1;\n\n Values validRot3;\n for(const VectorValues::value_type& it: relaxedRot3) {\n Key key = it.first;\n if (key != keyAnchor) {\n const Vector& rotVector = it.second;\n Matrix3 rotMat;\n rotMat(0,0) = rotVector(0); rotMat(0,1) = rotVector(1); rotMat(0,2) = rotVector(2);\n rotMat(1,0) = rotVector(3); rotMat(1,1) = rotVector(4); rotMat(1,2) = rotVector(5);\n rotMat(2,0) = rotVector(6); rotMat(2,1) = rotVector(7); rotMat(2,2) = rotVector(8);\n\n Matrix U, V; Vector s;\n svd(rotMat, U, s, V);\n Matrix3 normalizedRotMat = U * V.transpose();\n\n // std::cout << \"rotMat \\n\" << rotMat << std::endl;\n // std::cout << \"U V' \\n\" << U * V.transpose() << std::endl;\n // std::cout << \"V \\n\" << V << std::endl;\n\n if(normalizedRotMat.determinant() < 0)\n normalizedRotMat = U * ppm * V.transpose();\n\n Rot3 initRot = Rot3(normalizedRotMat);\n validRot3.insert(key, initRot);\n }\n }\n return validRot3;\n}\n\n/* ************************************************************************* */\n// Select the subgraph of betweenFactors and transforms priors into between wrt a fictitious node\nNonlinearFactorGraph buildPose3graph(const NonlinearFactorGraph& graph) {\n gttic(InitializePose3_buildPose3graph);\n NonlinearFactorGraph pose3Graph;\n\n for(const boost::shared_ptr& factor: graph) {\n\n // recast to a between on Pose3\n boost::shared_ptr > pose3Between =\n boost::dynamic_pointer_cast >(factor);\n if (pose3Between)\n pose3Graph.add(pose3Between);\n\n // recast PriorFactor to BetweenFactor\n boost::shared_ptr > pose3Prior =\n boost::dynamic_pointer_cast >(factor);\n if (pose3Prior)\n pose3Graph.emplace_shared >(keyAnchor, pose3Prior->keys()[0],\n pose3Prior->prior(), pose3Prior->noiseModel());\n }\n return pose3Graph;\n}\n\n/* ************************************************************************* */\n// Return the orientations of a graph including only BetweenFactors\nValues computeOrientationsChordal(const NonlinearFactorGraph& pose3Graph) {\n gttic(InitializePose3_computeOrientationsChordal);\n\n // regularize measurements and plug everything in a factor graph\n GaussianFactorGraph relaxedGraph = buildLinearOrientationGraph(pose3Graph);\n\n // Solve the LFG\n VectorValues relaxedRot3 = relaxedGraph.optimize();\n\n // normalize and compute Rot3\n return normalizeRelaxedRotations(relaxedRot3);\n}\n\n/* ************************************************************************* */\n// Return the orientations of a graph including only BetweenFactors\nValues computeOrientationsGradient(const NonlinearFactorGraph& pose3Graph, const Values& givenGuess, const size_t maxIter, const bool setRefFrame) {\n gttic(InitializePose3_computeOrientationsGradient);\n\n // this works on the inverse rotations, according to Tron&Vidal,2011\n Values inverseRot;\n inverseRot.insert(keyAnchor, Rot3());\n for(const Values::ConstKeyValuePair& key_value: givenGuess) {\n Key key = key_value.key;\n const Pose3& pose = givenGuess.at(key);\n inverseRot.insert(key, pose.rotation().inverse());\n }\n\n // Create the map of edges incident on each node\n KeyVectorMap adjEdgesMap;\n KeyRotMap factorId2RotMap;\n\n createSymbolicGraph(adjEdgesMap, factorId2RotMap, pose3Graph);\n\n // calculate max node degree & allocate gradient\n size_t maxNodeDeg = 0;\n VectorValues grad;\n for(const Values::ConstKeyValuePair& key_value: inverseRot) {\n Key key = key_value.key;\n grad.insert(key,Vector3::Zero());\n size_t currNodeDeg = (adjEdgesMap.at(key)).size();\n if(currNodeDeg > maxNodeDeg)\n maxNodeDeg = currNodeDeg;\n }\n\n // Create parameters\n double b = 1;\n double f0 = 1/b - (1/b + M_PI) * exp(-b*M_PI);\n double a = (M_PI*M_PI)/(2*f0);\n double rho = 2*a*b;\n double mu_max = maxNodeDeg * rho;\n double stepsize = 2/mu_max; // = 1/(a b dG)\n\n std::cout <<\" b \" << b <<\" f0 \" << f0 <<\" a \" << a <<\" rho \" << rho <<\" stepsize \" << stepsize << \" maxNodeDeg \"<< maxNodeDeg << std::endl;\n double maxGrad;\n // gradient iterations\n size_t it;\n for(it=0; it < maxIter; it++){\n //////////////////////////////////////////////////////////////////////////\n // compute the gradient at each node\n //std::cout << \"it \" << it <<\" b \" << b <<\" f0 \" << f0 <<\" a \" << a\n // <<\" rho \" << rho <<\" stepsize \" << stepsize << \" maxNodeDeg \"<< maxNodeDeg << std::endl;\n maxGrad = 0;\n for(const Values::ConstKeyValuePair& key_value: inverseRot) {\n Key key = key_value.key;\n //std::cout << \"---------------------------key \" << DefaultKeyFormatter(key) << std::endl;\n Vector gradKey = Vector3::Zero();\n // collect the gradient for each edge incident on key\n for(const size_t& factorId: adjEdgesMap.at(key)){\n Rot3 Rij = factorId2RotMap.at(factorId);\n Rot3 Ri = inverseRot.at(key);\n if( key == (pose3Graph.at(factorId))->keys()[0] ){\n Key key1 = (pose3Graph.at(factorId))->keys()[1];\n Rot3 Rj = inverseRot.at(key1);\n gradKey = gradKey + gradientTron(Ri, Rij * Rj, a, b);\n //std::cout << \"key1 \" << DefaultKeyFormatter(key1) << \" gradientTron(Ri, Rij * Rj, a, b) \\n \" << gradientTron(Ri, Rij * Rj, a, b) << std::endl;\n }else if( key == (pose3Graph.at(factorId))->keys()[1] ){\n Key key0 = (pose3Graph.at(factorId))->keys()[0];\n Rot3 Rj = inverseRot.at(key0);\n gradKey = gradKey + gradientTron(Ri, Rij.between(Rj), a, b);\n //std::cout << \"key0 \" << DefaultKeyFormatter(key0) << \" gradientTron(Ri, Rij.inverse() * Rj, a, b) \\n \" << gradientTron(Ri, Rij.between(Rj), a, b) << std::endl;\n }else{\n std::cout << \"Error in gradient computation\" << std::endl;\n }\n } // end of i-th gradient computation\n grad.at(key) = stepsize * gradKey;\n\n double normGradKey = (gradKey).norm();\n //std::cout << \"key \" << DefaultKeyFormatter(key) <<\" \\n grad \\n\" << grad.at(key) << std::endl;\n if(normGradKey>maxGrad)\n maxGrad = normGradKey;\n } // end of loop over nodes\n\n //////////////////////////////////////////////////////////////////////////\n // update estimates\n inverseRot = inverseRot.retract(grad);\n\n //////////////////////////////////////////////////////////////////////////\n // check stopping condition\n if (it>20 && maxGrad < 5e-3)\n break;\n } // enf of gradient iterations\n\n std::cout << \"nr of gradient iterations \" << it << \"maxGrad \" << maxGrad << std::endl;\n\n // Return correct rotations\n const Rot3& Rref = inverseRot.at(keyAnchor); // This will be set to the identity as so far we included no prior\n Values estimateRot;\n for(const Values::ConstKeyValuePair& key_value: inverseRot) {\n Key key = key_value.key;\n if (key != keyAnchor) {\n const Rot3& R = inverseRot.at(key);\n if(setRefFrame)\n estimateRot.insert(key, Rref.compose(R.inverse()));\n else\n estimateRot.insert(key, R.inverse());\n }\n }\n return estimateRot;\n}\n\n/* ************************************************************************* */\nvoid createSymbolicGraph(KeyVectorMap& adjEdgesMap, KeyRotMap& factorId2RotMap, const NonlinearFactorGraph& pose3Graph){\n size_t factorId = 0;\n for(const boost::shared_ptr& factor: pose3Graph) {\n boost::shared_ptr > pose3Between =\n boost::dynamic_pointer_cast >(factor);\n if (pose3Between){\n Rot3 Rij = pose3Between->measured().rotation();\n factorId2RotMap.insert(pair(factorId,Rij));\n\n Key key1 = pose3Between->key1();\n if (adjEdgesMap.find(key1) != adjEdgesMap.end()){ // key is already in\n adjEdgesMap.at(key1).push_back(factorId);\n }else{\n vector edge_id;\n edge_id.push_back(factorId);\n adjEdgesMap.insert(pair >(key1, edge_id));\n }\n Key key2 = pose3Between->key2();\n if (adjEdgesMap.find(key2) != adjEdgesMap.end()){ // key is already in\n adjEdgesMap.at(key2).push_back(factorId);\n }else{\n vector edge_id;\n edge_id.push_back(factorId);\n adjEdgesMap.insert(pair >(key2, edge_id));\n }\n }else{\n std::cout << \"Error in computeOrientationsGradient\" << std::endl;\n }\n factorId++;\n }\n}\n\n/* ************************************************************************* */\nVector3 gradientTron(const Rot3& R1, const Rot3& R2, const double a, const double b) {\n Vector3 logRot = Rot3::Logmap(R1.between(R2));\n\n double th = logRot.norm();\n if(th != th){ // the second case means that th = nan (logRot does not work well for +/-pi)\n Rot3 R1pert = R1.compose( Rot3::Expmap(Vector3(0.01, 0.01, 0.01)) ); // some perturbation\n logRot = Rot3::Logmap(R1pert.between(R2));\n th = logRot.norm();\n }\n // exclude small or invalid rotations\n if (th > 1e-5 && th == th){ // nonzero valid rotations\n logRot = logRot / th;\n }else{\n logRot = Vector3::Zero();\n th = 0.0;\n }\n\n double fdot = a*b*th*exp(-b*th);\n return fdot*logRot;\n}\n\n/* ************************************************************************* */\nValues initializeOrientations(const NonlinearFactorGraph& graph) {\n\n // We \"extract\" the Pose3 subgraph of the original graph: this\n // is done to properly model priors and avoiding operating on a larger graph\n NonlinearFactorGraph pose3Graph = buildPose3graph(graph);\n\n // Get orientations from relative orientation measurements\n return computeOrientationsChordal(pose3Graph);\n}\n\n///* ************************************************************************* */\nValues computePoses(NonlinearFactorGraph& pose3graph, Values& initialRot) {\n gttic(InitializePose3_computePoses);\n\n // put into Values structure\n Values initialPose;\n for(const Values::ConstKeyValuePair& key_value: initialRot){\n Key key = key_value.key;\n const Rot3& rot = initialRot.at(key);\n Pose3 initializedPose = Pose3(rot, Point3(0, 0, 0));\n initialPose.insert(key, initializedPose);\n }\n // add prior\n noiseModel::Unit::shared_ptr priorModel = noiseModel::Unit::Create(6);\n initialPose.insert(keyAnchor, Pose3());\n pose3graph.emplace_shared >(keyAnchor, Pose3(), priorModel);\n\n // Create optimizer\n GaussNewtonParams params;\n bool singleIter = true;\n if(singleIter){\n params.maxIterations = 1;\n }else{\n std::cout << \" \\n\\n\\n\\n performing more than 1 GN iterations \\n\\n\\n\" <(key);\n estimate.insert(key, pose);\n }\n }\n return estimate;\n}\n\n/* ************************************************************************* */\nValues initialize(const NonlinearFactorGraph& graph) {\n gttic(InitializePose3_initialize);\n\n // We \"extract\" the Pose3 subgraph of the original graph: this\n // is done to properly model priors and avoiding operating on a larger graph\n NonlinearFactorGraph pose3Graph = buildPose3graph(graph);\n\n // Get orientations from relative orientation measurements\n Values valueRot3 = computeOrientationsChordal(pose3Graph);\n\n // Compute the full poses (1 GN iteration on full poses)\n return computePoses(pose3Graph, valueRot3);\n}\n\n/* ************************************************************************* */\nValues initialize(const NonlinearFactorGraph& graph, const Values& givenGuess, bool useGradient) {\n Values initialValues;\n\n // We \"extract\" the Pose3 subgraph of the original graph: this\n // is done to properly model priors and avoiding operating on a larger graph\n NonlinearFactorGraph pose3Graph = buildPose3graph(graph);\n\n // Get orientations from relative orientation measurements\n Values orientations;\n if(useGradient)\n orientations = computeOrientationsGradient(pose3Graph, givenGuess);\n else\n orientations = computeOrientationsChordal(pose3Graph);\n\n// orientations.print(\"orientations\\n\");\n\n // Compute the full poses (1 GN iteration on full poses)\n return computePoses(pose3Graph, orientations);\n\n // for(const Values::ConstKeyValuePair& key_value: orientations) {\n // Key key = key_value.key;\n // if (key != keyAnchor) {\n // const Point3& pos = givenGuess.at(key).translation();\n // const Rot3& rot = orientations.at(key);\n // Pose3 initializedPoses = Pose3(rot, pos);\n // initialValues.insert(key, initializedPoses);\n // }\n // }\n // return initialValues;\n}\n\n} // end of namespace lago\n} // end of namespace gtsam\n", "meta": {"hexsha": "58408e7e32d2915dcd43ed900b96d96e696f94c4", "size": 15436, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "gtsam/slam/InitializePose3.cpp", "max_stars_repo_name": "colinxs/gtsam", "max_stars_repo_head_hexsha": "c6d9baf3ce4b5ced7fec4c52e304a31b8eadffd0", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2019-04-10T03:25:01.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-01T12:10:54.000Z", "max_issues_repo_path": "gtsam/slam/InitializePose3.cpp", "max_issues_repo_name": "colinxs/gtsam", "max_issues_repo_head_hexsha": "c6d9baf3ce4b5ced7fec4c52e304a31b8eadffd0", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2017-12-14T06:41:31.000Z", "max_issues_repo_issues_event_max_datetime": "2017-12-14T06:41:31.000Z", "max_forks_repo_path": "gtsam/slam/InitializePose3.cpp", "max_forks_repo_name": "colinxs/gtsam", "max_forks_repo_head_hexsha": "c6d9baf3ce4b5ced7fec4c52e304a31b8eadffd0", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2020-08-16T17:47:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-19T10:36:57.000Z", "avg_line_length": 37.9262899263, "max_line_length": 172, "alphanum_fraction": 0.616934439, "num_tokens": 4111, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7371581741774411, "lm_q2_score": 0.6757646075489391, "lm_q1q2_score": 0.498145404274511}} {"text": "/* ----------------------------------------------------------------------------\n\n * GTSAM Copyright 2010, Georgia Tech Research Corporation, \n * Atlanta, Georgia 30332-0415\n * All Rights Reserved\n * Authors: Frank Dellaert, et al. (see THANKS for the full author list)\n\n * See LICENSE for the license information\n\n * -------------------------------------------------------------------------- */\n\n/**\n * @file Pose3.cpp\n * @brief 3D Pose\n */\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace std;\n\nnamespace gtsam {\n\n/** Explicit instantiation of base class to export members */\nINSTANTIATE_LIE(Pose3);\n\n/** instantiate concept checks */\nGTSAM_CONCEPT_POSE_INST(Pose3);\n\nstatic const Matrix3 I3 = eye(3), Z3 = zeros(3, 3), _I3 = -I3;\nstatic const Matrix6 I6 = eye(6);\n\n/* ************************************************************************* */\nPose3::Pose3(const Pose2& pose2) :\n R_(Rot3::rodriguez(0, 0, pose2.theta())), t_(\n Point3(pose2.x(), pose2.y(), 0)) {\n}\n\n/* ************************************************************************* */\n// Calculate Adjoint map\n// Ad_pose is 6*6 matrix that when applied to twist xi, returns Ad_pose(xi)\n// Experimental - unit tests of derivatives based on it do not check out yet\nMatrix6 Pose3::AdjointMap() const {\n const Matrix3 R = R_.matrix();\n const Vector3 t = t_.vector();\n Matrix3 A = skewSymmetric(t) * R;\n Matrix6 adj;\n adj << R, Z3, A, R;\n return adj;\n}\n\n/* ************************************************************************* */\nMatrix6 Pose3::adjointMap(const Vector& xi) {\n Matrix3 w_hat = skewSymmetric(xi(0), xi(1), xi(2));\n Matrix3 v_hat = skewSymmetric(xi(3), xi(4), xi(5));\n Matrix6 adj;\n adj << w_hat, Z3, v_hat, w_hat;\n\n return adj;\n}\n\n/* ************************************************************************* */\nVector Pose3::adjoint(const Vector& xi, const Vector& y,\n boost::optional H) {\n if (H) {\n *H = zeros(6, 6);\n for (int i = 0; i < 6; ++i) {\n Vector dxi = zero(6);\n dxi(i) = 1.0;\n Matrix Gi = adjointMap(dxi);\n (*H).col(i) = Gi * y;\n }\n }\n return adjointMap(xi) * y;\n}\n\n/* ************************************************************************* */\nVector Pose3::adjointTranspose(const Vector& xi, const Vector& y,\n boost::optional H) {\n if (H) {\n *H = zeros(6, 6);\n for (int i = 0; i < 6; ++i) {\n Vector dxi = zero(6);\n dxi(i) = 1.0;\n Matrix GTi = adjointMap(dxi).transpose();\n (*H).col(i) = GTi * y;\n }\n }\n Matrix adjT = adjointMap(xi).transpose();\n return adjointMap(xi).transpose() * y;\n}\n\n/* ************************************************************************* */\nMatrix6 Pose3::dExpInv_exp(const Vector& xi) {\n // Bernoulli numbers, from Wikipedia\n static const Vector B = (Vector(9) << 1.0, -1.0 / 2.0, 1. / 6., 0.0, -1.0 / 30.0,\n 0.0, 1.0 / 42.0, 0.0, -1.0 / 30);\n static const int N = 5; // order of approximation\n Matrix res = I6;\n Matrix6 ad_i = I6;\n Matrix6 ad_xi = adjointMap(xi);\n double fac = 1.0;\n for (int i = 1; i < N; ++i) {\n ad_i = ad_xi * ad_i;\n fac = fac * i;\n res = res + B(i) / fac * ad_i;\n }\n return res;\n}\n\n/* ************************************************************************* */\nvoid Pose3::print(const string& s) const {\n cout << s;\n R_.print(\"R:\\n\");\n t_.print(\"t: \");\n}\n\n/* ************************************************************************* */\nbool Pose3::equals(const Pose3& pose, double tol) const {\n return R_.equals(pose.R_, tol) && t_.equals(pose.t_, tol);\n}\n\n/* ************************************************************************* */\n/** Modified from Murray94book version (which assumes w and v normalized?) */\nPose3 Pose3::Expmap(const Vector& xi) {\n\n // get angular velocity omega and translational velocity v from twist xi\n Point3 w(xi(0), xi(1), xi(2)), v(xi(3), xi(4), xi(5));\n\n double theta = w.norm();\n if (theta < 1e-10) {\n static const Rot3 I;\n return Pose3(I, v);\n } else {\n Point3 n(w / theta); // axis unit vector\n Rot3 R = Rot3::rodriguez(n.vector(), theta);\n double vn = n.dot(v); // translation parallel to n\n Point3 n_cross_v = n.cross(v); // points towards axis\n Point3 t = (n_cross_v - R * n_cross_v) / theta + vn * n;\n return Pose3(R, t);\n }\n}\n\n/* ************************************************************************* */\nVector6 Pose3::Logmap(const Pose3& p) {\n Vector3 w = Rot3::Logmap(p.rotation()), T = p.translation().vector();\n double t = w.norm();\n if (t < 1e-10) {\n Vector6 log;\n log << w, T;\n return log;\n } else {\n Matrix3 W = skewSymmetric(w / t);\n // Formula from Agrawal06iros, equation (14)\n // simplified with Mathematica, and multiplying in T to avoid matrix math\n double Tan = tan(0.5 * t);\n Vector3 WT = W * T;\n Vector3 u = T - (0.5 * t) * WT + (1 - t / (2. * Tan)) * (W * WT);\n Vector6 log;\n log << w, u;\n return log;\n }\n}\n\n/* ************************************************************************* */\nPose3 Pose3::retractFirstOrder(const Vector& xi) const {\n Vector3 omega(sub(xi, 0, 3));\n Point3 v(sub(xi, 3, 6));\n Rot3 R = R_.retract(omega); // R is done exactly\n Point3 t = t_ + R_ * v; // First order t approximation\n return Pose3(R, t);\n}\n\n/* ************************************************************************* */\n// Different versions of retract\nPose3 Pose3::retract(const Vector& xi, Pose3::CoordinatesMode mode) const {\n if (mode == Pose3::EXPMAP) {\n // Lie group exponential map, traces out geodesic\n return compose(Expmap(xi));\n } else if (mode == Pose3::FIRST_ORDER) {\n // First order\n return retractFirstOrder(xi);\n } else {\n // Point3 t = t_.retract(v.vector()); // Incorrect version retracts t independently\n // Point3 t = t_ + R_ * (v+Point3(omega).cross(v)/2); // Second order t approximation\n assert(false);\n exit(1);\n }\n}\n\n/* ************************************************************************* */\n// different versions of localCoordinates\nVector6 Pose3::localCoordinates(const Pose3& T,\n Pose3::CoordinatesMode mode) const {\n if (mode == Pose3::EXPMAP) {\n // Lie group logarithm map, exact inverse of exponential map\n return Logmap(between(T));\n } else if (mode == Pose3::FIRST_ORDER) {\n // R is always done exactly in all three retract versions below\n Vector3 omega = R_.localCoordinates(T.rotation());\n\n // Incorrect version\n // Independently computes the logmap of the translation and rotation\n // Vector v = t_.localCoordinates(T.translation());\n\n // Correct first order t inverse\n Point3 d = R_.unrotate(T.translation() - t_);\n\n // TODO: correct second order t inverse\n Vector6 local;\n local << omega(0), omega(1), omega(2), d.x(), d.y(), d.z();\n return local;\n } else {\n assert(false);\n exit(1);\n }\n}\n\n/* ************************************************************************* */\nMatrix4 Pose3::matrix() const {\n const Matrix3 R = R_.matrix();\n const Vector3 T = t_.vector();\n Eigen::Matrix A14;\n A14 << 0.0, 0.0, 0.0, 1.0;\n Matrix4 mat;\n mat << R, T, A14;\n return mat;\n}\n\n/* ************************************************************************* */\nPose3 Pose3::transform_to(const Pose3& pose) const {\n Rot3 cRv = R_ * Rot3(pose.R_.inverse());\n Point3 t = pose.transform_to(t_);\n return Pose3(cRv, t);\n}\n\n/* ************************************************************************* */\nPoint3 Pose3::transform_from(const Point3& p, boost::optional Dpose,\n boost::optional Dpoint) const {\n if (Dpose) {\n const Matrix R = R_.matrix();\n Matrix DR = R * skewSymmetric(-p.x(), -p.y(), -p.z());\n Dpose->resize(3, 6);\n (*Dpose) << DR, R;\n }\n if (Dpoint)\n *Dpoint = R_.matrix();\n return R_ * p + t_;\n}\n\n/* ************************************************************************* */\nPoint3 Pose3::transform_to(const Point3& p, boost::optional Dpose,\n boost::optional Dpoint) const {\n const Point3 result = R_.unrotate(p - t_);\n if (Dpose) {\n const Point3& q = result;\n Matrix DR = skewSymmetric(q.x(), q.y(), q.z());\n Dpose->resize(3, 6);\n (*Dpose) << DR, _I3;\n }\n if (Dpoint)\n *Dpoint = R_.transpose();\n return result;\n}\n\n/* ************************************************************************* */\nPose3 Pose3::compose(const Pose3& p2, boost::optional H1,\n boost::optional H2) const {\n if (H1)\n *H1 = p2.inverse().AdjointMap();\n if (H2)\n *H2 = I6;\n return (*this) * p2;\n}\n\n/* ************************************************************************* */\nPose3 Pose3::inverse(boost::optional H1) const {\n if (H1)\n *H1 = -AdjointMap();\n Rot3 Rt = R_.inverse();\n return Pose3(Rt, Rt * (-t_));\n}\n\n/* ************************************************************************* */\n// between = compose(p2,inverse(p1));\nPose3 Pose3::between(const Pose3& p2, boost::optional H1,\n boost::optional H2) const {\n Pose3 result = inverse() * p2;\n if (H1)\n *H1 = -result.inverse().AdjointMap();\n if (H2)\n *H2 = I6;\n return result;\n}\n\n/* ************************************************************************* */\ndouble Pose3::range(const Point3& point, boost::optional H1,\n boost::optional H2) const {\n if (!H1 && !H2)\n return transform_to(point).norm();\n Point3 d = transform_to(point, H1, H2);\n double x = d.x(), y = d.y(), z = d.z(), d2 = x * x + y * y + z * z, n = sqrt(\n d2);\n Matrix D_result_d = (Matrix(1, 3) << x / n, y / n, z / n);\n if (H1)\n *H1 = D_result_d * (*H1);\n if (H2)\n *H2 = D_result_d * (*H2);\n return n;\n}\n\n/* ************************************************************************* */\ndouble Pose3::range(const Pose3& point, boost::optional H1,\n boost::optional H2) const {\n double r = range(point.translation(), H1, H2);\n if (H2) {\n Matrix H2_ = *H2 * point.rotation().matrix();\n *H2 = zeros(1, 6);\n insertSub(*H2, H2_, 0, 3);\n }\n return r;\n}\n\n/* ************************************************************************* */\nboost::optional align(const vector& pairs) {\n const size_t n = pairs.size();\n if (n < 3)\n return boost::none; // we need at least three pairs\n\n // calculate centroids\n Vector cp = zero(3), cq = zero(3);\n BOOST_FOREACH(const Point3Pair& pair, pairs){\n cp += pair.first.vector();\n cq += pair.second.vector();\n}\n double f = 1.0 / n;\n cp *= f;\n cq *= f;\n\n // Add to form H matrix\n Matrix H = zeros(3, 3);\n BOOST_FOREACH(const Point3Pair& pair, pairs){\n Vector dp = pair.first.vector() - cp;\n Vector dq = pair.second.vector() - cq;\n H += dp * dq.transpose();\n}\n\n// Compute SVD\n Matrix U, V;\n Vector S;\n svd(H, U, S, V);\n\n // Recover transform with correction from Eggert97machinevisionandapplications\n Matrix UVtranspose = U * V.transpose();\n Matrix detWeighting = eye(3, 3);\n detWeighting(2, 2) = UVtranspose.determinant();\n Rot3 R(Matrix(V * detWeighting * U.transpose()));\n Point3 t = Point3(cq) - R * Point3(cp);\n return Pose3(R, t);\n}\n\n/* ************************************************************************* */\nstd::ostream &operator<<(std::ostream &os, const Pose3& pose) {\n os << pose.rotation() << \"\\n\" << pose.translation() << endl;\n return os;\n}\n\n} // namespace gtsam\n", "meta": {"hexsha": "bfd2fcb9aed98a9b673d8911a898399d8502cb4f", "size": 11512, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "gtsam/geometry/Pose3.cpp", "max_stars_repo_name": "ashariati/gtsam-3.2.1", "max_stars_repo_head_hexsha": "f880365c259eb7532b9c1d20979ecad2eb04779c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 8.0, "max_stars_repo_stars_event_min_datetime": "2017-05-10T08:07:42.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-26T19:11:18.000Z", "max_issues_repo_path": "gtsam/geometry/Pose3.cpp", "max_issues_repo_name": "ashariati/gtsam-3.2.1", "max_issues_repo_head_hexsha": "f880365c259eb7532b9c1d20979ecad2eb04779c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gtsam/geometry/Pose3.cpp", "max_forks_repo_name": "ashariati/gtsam-3.2.1", "max_forks_repo_head_hexsha": "f880365c259eb7532b9c1d20979ecad2eb04779c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 6.0, "max_forks_repo_forks_event_min_datetime": "2017-02-17T18:55:24.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-25T00:28:40.000Z", "avg_line_length": 30.7807486631, "max_line_length": 89, "alphanum_fraction": 0.5044301598, "num_tokens": 3215, "lm_name": "Qwen/Qwen-72B", "lm_label": "1. YES\n2. YES", "lm_q1_score": 0.7371581510799252, "lm_q2_score": 0.6757645944891558, "lm_q1q2_score": 0.49814537903890155}} {"text": "/*cppimport\n<%\ncfg['include_dirs'] = ['../..','../extern']\ncfg['compiler_args'] = ['-std=c++17', '-w']\ncfg['dependencies'] = ['xbin.hpp', '../util/assertions.hpp',\n'../util/global_rng.hpp']\n\ncfg['parallel'] = False\nsetup_pybind11(cfg)\n%>\n*/\n\n#include \n\n#include \n#include \n\n#include \"rpxdock/util/Timer.hpp\"\n#include \"rpxdock/util/assertions.hpp\"\n#include \"rpxdock/util/global_rng.hpp\"\n#include \"rpxdock/util/types.hpp\"\n#include \"rpxdock/xbin/xbin.hpp\"\n\n#include \nnamespace py = pybind11;\n\nnamespace rpxdock {\nusing namespace util;\n\nnamespace xbin {\nnamespace test {\n\ntemplate \nvoid rand_xform(std::mt19937& rng, Eigen::Transform& x,\n float max_cart = 512.0f) {\n std::uniform_real_distribution runif;\n std::normal_distribution rnorm;\n Eigen::Quaternion qrand(rnorm(rng), rnorm(rng), rnorm(rng), rnorm(rng));\n qrand.normalize();\n x.linear() = qrand.matrix();\n x.translation() = V3(runif(rng) * max_cart - max_cart / 2.0,\n runif(rng) * max_cart - max_cart / 2.0,\n runif(rng) * max_cart - max_cart / 2.0);\n}\n\ntemplate \nX3 rand_xform(F max_cart = 512.0) {\n X3 x;\n rand_xform(global_rng(), x, max_cart);\n return x;\n}\n\nusing std::cout;\nusing std::endl;\n\ntypedef Eigen::Transform Xform;\n// typedef Eigen::Affine3d Xform;\n\ntemplate