Datasets:

Modalities:
Text
Formats:
parquet
Size:
< 1K
ArXiv:
Libraries:
Datasets
pandas
func_name
stringlengths
2
53
func_src_before
stringlengths
63
114k
func_src_after
stringlengths
86
114k
line_changes
dict
char_changes
dict
commit_link
stringlengths
66
117
file_name
stringlengths
5
72
vul_type
stringclasses
9 values
dd_get_item_size
long dd_get_item_size(struct dump_dir *dd, const char *name) { long size = -1; char *iname = concat_path_file(dd->dd_dirname, name); struct stat statbuf; if (lstat(iname, &statbuf) == 0 && S_ISREG(statbuf.st_mode)) size = statbuf.st_size; else { if (errno == ENOENT) size = 0; else perror_msg("Can't get size of file '%s'", iname); } free(iname); return size; }
long dd_get_item_size(struct dump_dir *dd, const char *name) { if (!str_is_correct_filename(name)) error_msg_and_die("Cannot get item size. '%s' is not a valid file name", name); long size = -1; char *iname = concat_path_file(dd->dd_dirname, name); struct stat statbuf; if (lstat(iname, &statbuf) == 0 && S_ISREG(statbuf.st_mode)) size = statbuf.st_size; else { if (errno == ENOENT) size = 0; else perror_msg("Can't get size of file '%s'", iname); } free(iname); return size; }
{ "deleted": [], "added": [ { "line_no": 3, "char_start": 63, "char_end": 103, "line": " if (!str_is_correct_filename(name))\n" }, { "line_no": 4, "char_start": 103, "char_end": 191, "line": " error_msg_and_die(\"Cannot get item size. '%s' is not a valid file name\", name);\n" }, { "line_no": 5, "char_start": 191, "char_end": 192, "line": "\n" } ] }
{ "deleted": [], "added": [ { "char_start": 67, "char_end": 196, "chars": "if (!str_is_correct_filename(name))\n error_msg_and_die(\"Cannot get item size. '%s' is not a valid file name\", name);\n\n " } ] }
github.com/abrt/libreport/commit/239c4f7d1f47265526b39ad70106767d00805277
src/lib/dump_dir.c
cwe-022
zmi_page_request
def zmi_page_request(self, *args, **kwargs): request = self.REQUEST RESPONSE = request.RESPONSE SESSION = request.SESSION self._zmi_page_request() RESPONSE.setHeader('Expires',DateTime(request['ZMI_TIME']-10000).toZone('GMT+1').rfc822()) RESPONSE.setHeader('Cache-Control', 'no-cache') RESPONSE.setHeader('Pragma', 'no-cache') RESPONSE.setHeader('Content-Type', 'text/html;charset=%s'%request['ZMS_CHARSET']) if not request.get( 'preview'): request.set( 'preview','preview') langs = self.getLanguages(request) if request.get('lang') not in langs: request.set('lang',langs[0]) if request.get('manage_lang') not in self.getLocale().get_manage_langs(): request.set('manage_lang',self.get_manage_lang()) if not request.get('manage_tabs_message'): request.set( 'manage_tabs_message',self.getConfProperty('ZMS.manage_tabs_message','')) # manage_system if request.form.has_key('zmi-manage-system'): request.SESSION.set('zmi-manage-system',int(request.get('zmi-manage-system'))) # avoid declarative urls physical_path = self.getPhysicalPath() path_to_handle = request['URL0'][len(request['BASE0']):].split('/') path = path_to_handle[:-1] if len(filter(lambda x:x.find('.')>0 or x.startswith('manage_'),path))==0: for i in range(len(path)): if path[:-(i+1)] != physical_path[:-(i+1)]: path[:-(i+1)] = physical_path[:-(i+1)] new_path = path+[path_to_handle[-1]] if path_to_handle != new_path: request.RESPONSE.redirect('/'.join(new_path))
def zmi_page_request(self, *args, **kwargs): request = self.REQUEST RESPONSE = request.RESPONSE SESSION = request.SESSION self._zmi_page_request() RESPONSE.setHeader('Expires',DateTime(request['ZMI_TIME']-10000).toZone('GMT+1').rfc822()) RESPONSE.setHeader('Cache-Control', 'no-cache') RESPONSE.setHeader('Pragma', 'no-cache') RESPONSE.setHeader('Content-Type', 'text/html;charset=%s'%request['ZMS_CHARSET']) if not request.get( 'preview'): request.set( 'preview','preview') langs = self.getLanguages(request) if request.get('lang') not in langs: request.set('lang',langs[0]) if request.get('manage_lang') not in self.getLocale().get_manage_langs(): request.set('manage_lang',self.get_manage_lang()) if not request.get('manage_tabs_message'): request.set( 'manage_tabs_message',self.getConfProperty('ZMS.manage_tabs_message','')) # manage_system if request.form.has_key('zmi-manage-system'): request.SESSION.set('zmi-manage-system',int(request.get('zmi-manage-system'))) # avoid declarative urls physical_path = self.getPhysicalPath() path_to_handle = request['URL0'][len(request['BASE0']):].split('/') path = path_to_handle[:-1] if self.getDocumentElement().id in path and len(filter(lambda x:x.find('.')>0 or x.startswith('manage_'),path))==0: for i in range(len(path)): if path[:-(i+1)] != physical_path[:-(i+1)]: path[:-(i+1)] = physical_path[:-(i+1)] new_path = path+[path_to_handle[-1]] if path_to_handle != new_path: request.RESPONSE.redirect('/'.join(new_path))
{ "deleted": [ { "line_no": 26, "char_start": 1313, "char_end": 1395, "line": " if len(filter(lambda x:x.find('.')>0 or x.startswith('manage_'),path))==0:\r\n" } ], "added": [ { "line_no": 26, "char_start": 1313, "char_end": 1436, "line": " if self.getDocumentElement().id in path and len(filter(lambda x:x.find('.')>0 or x.startswith('manage_'),path))==0:\r\n" } ] }
{ "deleted": [], "added": [ { "char_start": 1322, "char_end": 1363, "chars": "self.getDocumentElement().id in path and " } ] }
github.com/zms-publishing/zms4/commit/3f28620d475220dfdb06f79787158ac50727c61a
ZMSItem.py
cwe-022
nntp_hcache_namer
static int nntp_hcache_namer(const char *path, char *dest, size_t destlen) { return snprintf(dest, destlen, "%s.hcache", path); }
static int nntp_hcache_namer(const char *path, char *dest, size_t destlen) { int count = snprintf(dest, destlen, "%s.hcache", path); /* Strip out any directories in the path */ char *first = strchr(dest, '/'); char *last = strrchr(dest, '/'); if (first && last && (last > first)) { memmove(first, last, strlen(last) + 1); count -= (last - first); } return count; }
{ "deleted": [ { "line_no": 3, "char_start": 77, "char_end": 130, "line": " return snprintf(dest, destlen, \"%s.hcache\", path);\n" } ], "added": [ { "line_no": 3, "char_start": 77, "char_end": 135, "line": " int count = snprintf(dest, destlen, \"%s.hcache\", path);\n" }, { "line_no": 4, "char_start": 135, "char_end": 136, "line": "\n" }, { "line_no": 5, "char_start": 136, "char_end": 182, "line": " /* Strip out any directories in the path */\n" }, { "line_no": 6, "char_start": 182, "char_end": 217, "line": " char *first = strchr(dest, '/');\n" }, { "line_no": 7, "char_start": 217, "char_end": 252, "line": " char *last = strrchr(dest, '/');\n" }, { "line_no": 8, "char_start": 252, "char_end": 291, "line": " if (first && last && (last > first))\n" }, { "line_no": 9, "char_start": 291, "char_end": 295, "line": " {\n" }, { "line_no": 10, "char_start": 295, "char_end": 339, "line": " memmove(first, last, strlen(last) + 1);\n" }, { "line_no": 11, "char_start": 339, "char_end": 368, "line": " count -= (last - first);\n" }, { "line_no": 12, "char_start": 368, "char_end": 372, "line": " }\n" }, { "line_no": 13, "char_start": 372, "char_end": 373, "line": "\n" }, { "line_no": 14, "char_start": 373, "char_end": 389, "line": " return count;\n" } ] }
{ "deleted": [ { "char_start": 79, "char_end": 81, "chars": "re" }, { "char_start": 83, "char_end": 84, "chars": "r" } ], "added": [ { "char_start": 79, "char_end": 81, "chars": "in" }, { "char_start": 82, "char_end": 85, "chars": " co" }, { "char_start": 87, "char_end": 90, "chars": "t =" }, { "char_start": 133, "char_end": 387, "chars": ";\n\n /* Strip out any directories in the path */\n char *first = strchr(dest, '/');\n char *last = strrchr(dest, '/');\n if (first && last && (last > first))\n {\n memmove(first, last, strlen(last) + 1);\n count -= (last - first);\n }\n\n return count" } ] }
github.com/neomutt/neomutt/commit/9bfab35522301794483f8f9ed60820bdec9be59e
newsrc.c
cwe-022
TarFileReader::extract
std::string TarFileReader::extract(const string &_path) { if (_path.empty()) THROW("path cannot be empty"); if (!hasMore()) THROW("No more tar files"); string path = _path; if (SystemUtilities::isDirectory(path)) path += "/" + getFilename(); LOG_DEBUG(5, "Extracting: " << path); return extract(*SystemUtilities::oopen(path)); }
std::string TarFileReader::extract(const string &_path) { if (_path.empty()) THROW("path cannot be empty"); if (!hasMore()) THROW("No more tar files"); string path = _path; if (SystemUtilities::isDirectory(path)) { path += "/" + getFilename(); // Check that path is under the target directory string a = SystemUtilities::getCanonicalPath(_path); string b = SystemUtilities::getCanonicalPath(path); if (!String::startsWith(b, a)) THROW("Tar path points outside of the extraction directory: " << path); } LOG_DEBUG(5, "Extracting: " << path); switch (getType()) { case NORMAL_FILE: case CONTIGUOUS_FILE: return extract(*SystemUtilities::oopen(path)); case DIRECTORY: SystemUtilities::ensureDirectory(path); break; default: THROW("Unsupported tar file type " << getType()); } return getFilename(); }
{ "deleted": [ { "line_no": 6, "char_start": 180, "char_end": 251, "line": " if (SystemUtilities::isDirectory(path)) path += \"/\" + getFilename();\n" }, { "line_no": 10, "char_start": 293, "char_end": 342, "line": " return extract(*SystemUtilities::oopen(path));\n" } ], "added": [ { "line_no": 6, "char_start": 180, "char_end": 224, "line": " if (SystemUtilities::isDirectory(path)) {\n" }, { "line_no": 7, "char_start": 224, "char_end": 257, "line": " path += \"/\" + getFilename();\n" }, { "line_no": 8, "char_start": 257, "char_end": 258, "line": "\n" }, { "line_no": 10, "char_start": 311, "char_end": 368, "line": " string a = SystemUtilities::getCanonicalPath(_path);\n" }, { "line_no": 11, "char_start": 368, "char_end": 424, "line": " string b = SystemUtilities::getCanonicalPath(path);\n" }, { "line_no": 12, "char_start": 424, "char_end": 459, "line": " if (!String::startsWith(b, a))\n" }, { "line_no": 13, "char_start": 459, "char_end": 537, "line": " THROW(\"Tar path points outside of the extraction directory: \" << path);\n" }, { "line_no": 14, "char_start": 537, "char_end": 541, "line": " }\n" }, { "line_no": 18, "char_start": 583, "char_end": 606, "line": " switch (getType()) {\n" }, { "line_no": 19, "char_start": 606, "char_end": 648, "line": " case NORMAL_FILE: case CONTIGUOUS_FILE:\n" }, { "line_no": 20, "char_start": 648, "char_end": 699, "line": " return extract(*SystemUtilities::oopen(path));\n" }, { "line_no": 21, "char_start": 699, "char_end": 764, "line": " case DIRECTORY: SystemUtilities::ensureDirectory(path); break;\n" }, { "line_no": 22, "char_start": 764, "char_end": 825, "line": " default: THROW(\"Unsupported tar file type \" << getType());\n" }, { "line_no": 23, "char_start": 825, "char_end": 829, "line": " }\n" }, { "line_no": 24, "char_start": 829, "char_end": 830, "line": "\n" }, { "line_no": 25, "char_start": 830, "char_end": 854, "line": " return getFilename();\n" } ] }
{ "deleted": [], "added": [ { "char_start": 222, "char_end": 228, "chars": "{\n " }, { "char_start": 260, "char_end": 544, "chars": " // Check that path is under the target directory\n string a = SystemUtilities::getCanonicalPath(_path);\n string b = SystemUtilities::getCanonicalPath(path);\n if (!String::startsWith(b, a))\n THROW(\"Tar path points outside of the extraction directory: \" << path);\n }\n\n " }, { "char_start": 585, "char_end": 652, "chars": "switch (getType()) {\n case NORMAL_FILE: case CONTIGUOUS_FILE:\n " }, { "char_start": 696, "char_end": 851, "chars": ");\n case DIRECTORY: SystemUtilities::ensureDirectory(path); break;\n default: THROW(\"Unsupported tar file type \" << getType());\n }\n\n return getFilename(" } ] }
github.com/CauldronDevelopmentLLC/cbang/commit/1c1dba62bd3e6fa9d0d0c0aa21926043b75382c7
src/cbang/tar/TarFileReader.cpp
cwe-022
handle_method_call
static void handle_method_call(GDBusConnection *connection, const gchar *caller, const gchar *object_path, const gchar *interface_name, const gchar *method_name, GVariant *parameters, GDBusMethodInvocation *invocation, gpointer user_data) { reset_timeout(); uid_t caller_uid; GVariant *response; caller_uid = get_caller_uid(connection, invocation, caller); log_notice("caller_uid:%ld method:'%s'", (long)caller_uid, method_name); if (caller_uid == (uid_t) -1) return; if (g_strcmp0(method_name, "NewProblem") == 0) { char *error = NULL; char *problem_id = handle_new_problem(g_variant_get_child_value(parameters, 0), caller_uid, &error); if (!problem_id) { g_dbus_method_invocation_return_dbus_error(invocation, "org.freedesktop.problems.Failure", error); free(error); return; } /* else */ response = g_variant_new("(s)", problem_id); g_dbus_method_invocation_return_value(invocation, response); free(problem_id); return; } if (g_strcmp0(method_name, "GetProblems") == 0) { GList *dirs = get_problem_dirs_for_uid(caller_uid, g_settings_dump_location); response = variant_from_string_list(dirs); list_free_with_free(dirs); g_dbus_method_invocation_return_value(invocation, response); //I was told that g_dbus_method frees the response //g_variant_unref(response); return; } if (g_strcmp0(method_name, "GetAllProblems") == 0) { /* - so, we have UID, - if it's 0, then we don't have to check anything and just return all directories - if uid != 0 then we want to ask for authorization */ if (caller_uid != 0) { if (polkit_check_authorization_dname(caller, "org.freedesktop.problems.getall") == PolkitYes) caller_uid = 0; } GList * dirs = get_problem_dirs_for_uid(caller_uid, g_settings_dump_location); response = variant_from_string_list(dirs); list_free_with_free(dirs); g_dbus_method_invocation_return_value(invocation, response); return; } if (g_strcmp0(method_name, "GetForeignProblems") == 0) { GList * dirs = get_problem_dirs_not_accessible_by_uid(caller_uid, g_settings_dump_location); response = variant_from_string_list(dirs); list_free_with_free(dirs); g_dbus_method_invocation_return_value(invocation, response); return; } if (g_strcmp0(method_name, "ChownProblemDir") == 0) { const gchar *problem_dir; g_variant_get(parameters, "(&s)", &problem_dir); log_notice("problem_dir:'%s'", problem_dir); if (!allowed_problem_dir(problem_dir)) { return_InvalidProblemDir_error(invocation, problem_dir); return; } int dir_fd = dd_openfd(problem_dir); if (dir_fd < 0) { perror_msg("can't open problem directory '%s'", problem_dir); return_InvalidProblemDir_error(invocation, problem_dir); return; } int ddstat = fdump_dir_stat_for_uid(dir_fd, caller_uid); if (ddstat < 0) { if (errno == ENOTDIR) { log_notice("requested directory does not exist '%s'", problem_dir); } else { perror_msg("can't get stat of '%s'", problem_dir); } return_InvalidProblemDir_error(invocation, problem_dir); close(dir_fd); return; } if (ddstat & DD_STAT_OWNED_BY_UID) { //caller seems to be in group with access to this dir, so no action needed log_notice("caller has access to the requested directory %s", problem_dir); g_dbus_method_invocation_return_value(invocation, NULL); close(dir_fd); return; } if ((ddstat & DD_STAT_ACCESSIBLE_BY_UID) == 0 && polkit_check_authorization_dname(caller, "org.freedesktop.problems.getall") != PolkitYes) { log_notice("not authorized"); g_dbus_method_invocation_return_dbus_error(invocation, "org.freedesktop.problems.AuthFailure", _("Not Authorized")); close(dir_fd); return; } struct dump_dir *dd = dd_fdopendir(dir_fd, problem_dir, DD_OPEN_READONLY | DD_FAIL_QUIETLY_EACCES); if (!dd) { return_InvalidProblemDir_error(invocation, problem_dir); return; } int chown_res = dd_chown(dd, caller_uid); if (chown_res != 0) g_dbus_method_invocation_return_dbus_error(invocation, "org.freedesktop.problems.ChownError", _("Chowning directory failed. Check system logs for more details.")); else g_dbus_method_invocation_return_value(invocation, NULL); dd_close(dd); return; } if (g_strcmp0(method_name, "GetInfo") == 0) { /* Parameter tuple is (sas) */ /* Get 1st param - problem dir name */ const gchar *problem_dir; g_variant_get_child(parameters, 0, "&s", &problem_dir); log_notice("problem_dir:'%s'", problem_dir); if (!allowed_problem_dir(problem_dir)) { return_InvalidProblemDir_error(invocation, problem_dir); return; } int dir_fd = dd_openfd(problem_dir); if (dir_fd < 0) { perror_msg("can't open problem directory '%s'", problem_dir); return_InvalidProblemDir_error(invocation, problem_dir); return; } if (!fdump_dir_accessible_by_uid(dir_fd, caller_uid)) { if (errno == ENOTDIR) { log_notice("Requested directory does not exist '%s'", problem_dir); return_InvalidProblemDir_error(invocation, problem_dir); close(dir_fd); return; } if (polkit_check_authorization_dname(caller, "org.freedesktop.problems.getall") != PolkitYes) { log_notice("not authorized"); g_dbus_method_invocation_return_dbus_error(invocation, "org.freedesktop.problems.AuthFailure", _("Not Authorized")); close(dir_fd); return; } } struct dump_dir *dd = dd_fdopendir(dir_fd, problem_dir, DD_OPEN_READONLY | DD_FAIL_QUIETLY_EACCES); if (!dd) { return_InvalidProblemDir_error(invocation, problem_dir); return; } /* Get 2nd param - vector of element names */ GVariant *array = g_variant_get_child_value(parameters, 1); GList *elements = string_list_from_variant(array); g_variant_unref(array); GVariantBuilder *builder = NULL; for (GList *l = elements; l; l = l->next) { const char *element_name = (const char*)l->data; char *value = dd_load_text_ext(dd, element_name, 0 | DD_LOAD_TEXT_RETURN_NULL_ON_FAILURE | DD_FAIL_QUIETLY_ENOENT | DD_FAIL_QUIETLY_EACCES); log_notice("element '%s' %s", element_name, value ? "fetched" : "not found"); if (value) { if (!builder) builder = g_variant_builder_new(G_VARIANT_TYPE_ARRAY); /* g_variant_builder_add makes a copy. No need to xstrdup here */ g_variant_builder_add(builder, "{ss}", element_name, value); free(value); } } list_free_with_free(elements); dd_close(dd); /* It is OK to call g_variant_new("(a{ss})", NULL) because */ /* G_VARIANT_TYPE_TUPLE allows NULL value */ GVariant *response = g_variant_new("(a{ss})", builder); if (builder) g_variant_builder_unref(builder); log_info("GetInfo: returning value for '%s'", problem_dir); g_dbus_method_invocation_return_value(invocation, response); return; } if (g_strcmp0(method_name, "SetElement") == 0) { const char *problem_id; const char *element; const char *value; g_variant_get(parameters, "(&s&s&s)", &problem_id, &element, &value); if (!str_is_correct_filename(element)) { log_notice("'%s' is not a valid element name of '%s'", element, problem_id); char *error = xasprintf(_("'%s' is not a valid element name"), element); g_dbus_method_invocation_return_dbus_error(invocation, "org.freedesktop.problems.InvalidElement", error); free(error); return; } struct dump_dir *dd = open_directory_for_modification_of_element( invocation, caller_uid, problem_id, element); if (!dd) /* Already logged from open_directory_for_modification_of_element() */ return; /* Is it good idea to make it static? Is it possible to change the max size while a single run? */ const double max_dir_size = g_settings_nMaxCrashReportsSize * (1024 * 1024); const long item_size = dd_get_item_size(dd, element); if (item_size < 0) { log_notice("Can't get size of '%s/%s'", problem_id, element); char *error = xasprintf(_("Can't get size of '%s'"), element); g_dbus_method_invocation_return_dbus_error(invocation, "org.freedesktop.problems.Failure", error); return; } const double requested_size = (double)strlen(value) - item_size; /* Don't want to check the size limit in case of reducing of size */ if (requested_size > 0 && requested_size > (max_dir_size - get_dirsize(g_settings_dump_location))) { log_notice("No problem space left in '%s' (requested Bytes %f)", problem_id, requested_size); g_dbus_method_invocation_return_dbus_error(invocation, "org.freedesktop.problems.Failure", _("No problem space left")); } else { dd_save_text(dd, element, value); g_dbus_method_invocation_return_value(invocation, NULL); } dd_close(dd); return; } if (g_strcmp0(method_name, "DeleteElement") == 0) { const char *problem_id; const char *element; g_variant_get(parameters, "(&s&s)", &problem_id, &element); if (!str_is_correct_filename(element)) { log_notice("'%s' is not a valid element name of '%s'", element, problem_id); char *error = xasprintf(_("'%s' is not a valid element name"), element); g_dbus_method_invocation_return_dbus_error(invocation, "org.freedesktop.problems.InvalidElement", error); free(error); return; } struct dump_dir *dd = open_directory_for_modification_of_element( invocation, caller_uid, problem_id, element); if (!dd) /* Already logged from open_directory_for_modification_of_element() */ return; const int res = dd_delete_item(dd, element); dd_close(dd); if (res != 0) { log_notice("Can't delete the element '%s' from the problem directory '%s'", element, problem_id); char *error = xasprintf(_("Can't delete the element '%s' from the problem directory '%s'"), element, problem_id); g_dbus_method_invocation_return_dbus_error(invocation, "org.freedesktop.problems.Failure", error); free(error); return; } g_dbus_method_invocation_return_value(invocation, NULL); return; } if (g_strcmp0(method_name, "DeleteProblem") == 0) { /* Dbus parameters are always tuples. * In this case, it's (as) - a tuple of one element (array of strings). * Need to fetch the array: */ GVariant *array = g_variant_get_child_value(parameters, 0); GList *problem_dirs = string_list_from_variant(array); g_variant_unref(array); for (GList *l = problem_dirs; l; l = l->next) { const char *dir_name = (const char*)l->data; log_notice("dir_name:'%s'", dir_name); if (!allowed_problem_dir(dir_name)) { return_InvalidProblemDir_error(invocation, dir_name); goto ret; } } for (GList *l = problem_dirs; l; l = l->next) { const char *dir_name = (const char*)l->data; int dir_fd = dd_openfd(dir_name); if (dir_fd < 0) { perror_msg("can't open problem directory '%s'", dir_name); return_InvalidProblemDir_error(invocation, dir_name); return; } if (!fdump_dir_accessible_by_uid(dir_fd, caller_uid)) { if (errno == ENOTDIR) { log_notice("Requested directory does not exist '%s'", dir_name); close(dir_fd); continue; } if (polkit_check_authorization_dname(caller, "org.freedesktop.problems.getall") != PolkitYes) { // if user didn't provide correct credentials, just move to the next dir close(dir_fd); continue; } } struct dump_dir *dd = dd_fdopendir(dir_fd, dir_name, /*flags:*/ 0); if (dd) { if (dd_delete(dd) != 0) { error_msg("Failed to delete problem directory '%s'", dir_name); dd_close(dd); } } } g_dbus_method_invocation_return_value(invocation, NULL); ret: list_free_with_free(problem_dirs); return; } if (g_strcmp0(method_name, "FindProblemByElementInTimeRange") == 0) { const gchar *element; const gchar *value; glong timestamp_from; glong timestamp_to; gboolean all; g_variant_get_child(parameters, 0, "&s", &element); g_variant_get_child(parameters, 1, "&s", &value); g_variant_get_child(parameters, 2, "x", &timestamp_from); g_variant_get_child(parameters, 3, "x", &timestamp_to); g_variant_get_child(parameters, 4, "b", &all); if (all && polkit_check_authorization_dname(caller, "org.freedesktop.problems.getall") == PolkitYes) caller_uid = 0; GList *dirs = get_problem_dirs_for_element_in_time(caller_uid, element, value, timestamp_from, timestamp_to); response = variant_from_string_list(dirs); list_free_with_free(dirs); g_dbus_method_invocation_return_value(invocation, response); return; } if (g_strcmp0(method_name, "Quit") == 0) { g_dbus_method_invocation_return_value(invocation, NULL); g_main_loop_quit(loop); return; } }
static void handle_method_call(GDBusConnection *connection, const gchar *caller, const gchar *object_path, const gchar *interface_name, const gchar *method_name, GVariant *parameters, GDBusMethodInvocation *invocation, gpointer user_data) { reset_timeout(); uid_t caller_uid; GVariant *response; caller_uid = get_caller_uid(connection, invocation, caller); log_notice("caller_uid:%ld method:'%s'", (long)caller_uid, method_name); if (caller_uid == (uid_t) -1) return; if (g_strcmp0(method_name, "NewProblem") == 0) { char *error = NULL; char *problem_id = handle_new_problem(g_variant_get_child_value(parameters, 0), caller_uid, &error); if (!problem_id) { g_dbus_method_invocation_return_dbus_error(invocation, "org.freedesktop.problems.Failure", error); free(error); return; } /* else */ response = g_variant_new("(s)", problem_id); g_dbus_method_invocation_return_value(invocation, response); free(problem_id); return; } if (g_strcmp0(method_name, "GetProblems") == 0) { GList *dirs = get_problem_dirs_for_uid(caller_uid, g_settings_dump_location); response = variant_from_string_list(dirs); list_free_with_free(dirs); g_dbus_method_invocation_return_value(invocation, response); //I was told that g_dbus_method frees the response //g_variant_unref(response); return; } if (g_strcmp0(method_name, "GetAllProblems") == 0) { /* - so, we have UID, - if it's 0, then we don't have to check anything and just return all directories - if uid != 0 then we want to ask for authorization */ if (caller_uid != 0) { if (polkit_check_authorization_dname(caller, "org.freedesktop.problems.getall") == PolkitYes) caller_uid = 0; } GList * dirs = get_problem_dirs_for_uid(caller_uid, g_settings_dump_location); response = variant_from_string_list(dirs); list_free_with_free(dirs); g_dbus_method_invocation_return_value(invocation, response); return; } if (g_strcmp0(method_name, "GetForeignProblems") == 0) { GList * dirs = get_problem_dirs_not_accessible_by_uid(caller_uid, g_settings_dump_location); response = variant_from_string_list(dirs); list_free_with_free(dirs); g_dbus_method_invocation_return_value(invocation, response); return; } if (g_strcmp0(method_name, "ChownProblemDir") == 0) { const gchar *problem_dir; g_variant_get(parameters, "(&s)", &problem_dir); log_notice("problem_dir:'%s'", problem_dir); if (!allowed_problem_dir(problem_dir)) { return_InvalidProblemDir_error(invocation, problem_dir); return; } int dir_fd = dd_openfd(problem_dir); if (dir_fd < 0) { perror_msg("can't open problem directory '%s'", problem_dir); return_InvalidProblemDir_error(invocation, problem_dir); return; } int ddstat = fdump_dir_stat_for_uid(dir_fd, caller_uid); if (ddstat < 0) { if (errno == ENOTDIR) { log_notice("requested directory does not exist '%s'", problem_dir); } else { perror_msg("can't get stat of '%s'", problem_dir); } return_InvalidProblemDir_error(invocation, problem_dir); close(dir_fd); return; } if (ddstat & DD_STAT_OWNED_BY_UID) { //caller seems to be in group with access to this dir, so no action needed log_notice("caller has access to the requested directory %s", problem_dir); g_dbus_method_invocation_return_value(invocation, NULL); close(dir_fd); return; } if ((ddstat & DD_STAT_ACCESSIBLE_BY_UID) == 0 && polkit_check_authorization_dname(caller, "org.freedesktop.problems.getall") != PolkitYes) { log_notice("not authorized"); g_dbus_method_invocation_return_dbus_error(invocation, "org.freedesktop.problems.AuthFailure", _("Not Authorized")); close(dir_fd); return; } struct dump_dir *dd = dd_fdopendir(dir_fd, problem_dir, DD_OPEN_READONLY | DD_FAIL_QUIETLY_EACCES); if (!dd) { return_InvalidProblemDir_error(invocation, problem_dir); return; } int chown_res = dd_chown(dd, caller_uid); if (chown_res != 0) g_dbus_method_invocation_return_dbus_error(invocation, "org.freedesktop.problems.ChownError", _("Chowning directory failed. Check system logs for more details.")); else g_dbus_method_invocation_return_value(invocation, NULL); dd_close(dd); return; } if (g_strcmp0(method_name, "GetInfo") == 0) { /* Parameter tuple is (sas) */ /* Get 1st param - problem dir name */ const gchar *problem_dir; g_variant_get_child(parameters, 0, "&s", &problem_dir); log_notice("problem_dir:'%s'", problem_dir); if (!allowed_problem_dir(problem_dir)) { return_InvalidProblemDir_error(invocation, problem_dir); return; } int dir_fd = dd_openfd(problem_dir); if (dir_fd < 0) { perror_msg("can't open problem directory '%s'", problem_dir); return_InvalidProblemDir_error(invocation, problem_dir); return; } if (!fdump_dir_accessible_by_uid(dir_fd, caller_uid)) { if (errno == ENOTDIR) { log_notice("Requested directory does not exist '%s'", problem_dir); return_InvalidProblemDir_error(invocation, problem_dir); close(dir_fd); return; } if (polkit_check_authorization_dname(caller, "org.freedesktop.problems.getall") != PolkitYes) { log_notice("not authorized"); g_dbus_method_invocation_return_dbus_error(invocation, "org.freedesktop.problems.AuthFailure", _("Not Authorized")); close(dir_fd); return; } } struct dump_dir *dd = dd_fdopendir(dir_fd, problem_dir, DD_OPEN_READONLY | DD_FAIL_QUIETLY_EACCES); if (!dd) { return_InvalidProblemDir_error(invocation, problem_dir); return; } /* Get 2nd param - vector of element names */ GVariant *array = g_variant_get_child_value(parameters, 1); GList *elements = string_list_from_variant(array); g_variant_unref(array); GVariantBuilder *builder = NULL; for (GList *l = elements; l; l = l->next) { const char *element_name = (const char*)l->data; char *value = dd_load_text_ext(dd, element_name, 0 | DD_LOAD_TEXT_RETURN_NULL_ON_FAILURE | DD_FAIL_QUIETLY_ENOENT | DD_FAIL_QUIETLY_EACCES); log_notice("element '%s' %s", element_name, value ? "fetched" : "not found"); if (value) { if (!builder) builder = g_variant_builder_new(G_VARIANT_TYPE_ARRAY); /* g_variant_builder_add makes a copy. No need to xstrdup here */ g_variant_builder_add(builder, "{ss}", element_name, value); free(value); } } list_free_with_free(elements); dd_close(dd); /* It is OK to call g_variant_new("(a{ss})", NULL) because */ /* G_VARIANT_TYPE_TUPLE allows NULL value */ GVariant *response = g_variant_new("(a{ss})", builder); if (builder) g_variant_builder_unref(builder); log_info("GetInfo: returning value for '%s'", problem_dir); g_dbus_method_invocation_return_value(invocation, response); return; } if (g_strcmp0(method_name, "SetElement") == 0) { const char *problem_id; const char *element; const char *value; g_variant_get(parameters, "(&s&s&s)", &problem_id, &element, &value); if (!allowed_problem_dir(problem_id)) { return_InvalidProblemDir_error(invocation, problem_id); return; } if (!str_is_correct_filename(element)) { log_notice("'%s' is not a valid element name of '%s'", element, problem_id); char *error = xasprintf(_("'%s' is not a valid element name"), element); g_dbus_method_invocation_return_dbus_error(invocation, "org.freedesktop.problems.InvalidElement", error); free(error); return; } struct dump_dir *dd = open_directory_for_modification_of_element( invocation, caller_uid, problem_id, element); if (!dd) /* Already logged from open_directory_for_modification_of_element() */ return; /* Is it good idea to make it static? Is it possible to change the max size while a single run? */ const double max_dir_size = g_settings_nMaxCrashReportsSize * (1024 * 1024); const long item_size = dd_get_item_size(dd, element); if (item_size < 0) { log_notice("Can't get size of '%s/%s'", problem_id, element); char *error = xasprintf(_("Can't get size of '%s'"), element); g_dbus_method_invocation_return_dbus_error(invocation, "org.freedesktop.problems.Failure", error); return; } const double requested_size = (double)strlen(value) - item_size; /* Don't want to check the size limit in case of reducing of size */ if (requested_size > 0 && requested_size > (max_dir_size - get_dirsize(g_settings_dump_location))) { log_notice("No problem space left in '%s' (requested Bytes %f)", problem_id, requested_size); g_dbus_method_invocation_return_dbus_error(invocation, "org.freedesktop.problems.Failure", _("No problem space left")); } else { dd_save_text(dd, element, value); g_dbus_method_invocation_return_value(invocation, NULL); } dd_close(dd); return; } if (g_strcmp0(method_name, "DeleteElement") == 0) { const char *problem_id; const char *element; g_variant_get(parameters, "(&s&s)", &problem_id, &element); if (!allowed_problem_dir(problem_id)) { return_InvalidProblemDir_error(invocation, problem_id); return; } if (!str_is_correct_filename(element)) { log_notice("'%s' is not a valid element name of '%s'", element, problem_id); char *error = xasprintf(_("'%s' is not a valid element name"), element); g_dbus_method_invocation_return_dbus_error(invocation, "org.freedesktop.problems.InvalidElement", error); free(error); return; } struct dump_dir *dd = open_directory_for_modification_of_element( invocation, caller_uid, problem_id, element); if (!dd) /* Already logged from open_directory_for_modification_of_element() */ return; const int res = dd_delete_item(dd, element); dd_close(dd); if (res != 0) { log_notice("Can't delete the element '%s' from the problem directory '%s'", element, problem_id); char *error = xasprintf(_("Can't delete the element '%s' from the problem directory '%s'"), element, problem_id); g_dbus_method_invocation_return_dbus_error(invocation, "org.freedesktop.problems.Failure", error); free(error); return; } g_dbus_method_invocation_return_value(invocation, NULL); return; } if (g_strcmp0(method_name, "DeleteProblem") == 0) { /* Dbus parameters are always tuples. * In this case, it's (as) - a tuple of one element (array of strings). * Need to fetch the array: */ GVariant *array = g_variant_get_child_value(parameters, 0); GList *problem_dirs = string_list_from_variant(array); g_variant_unref(array); for (GList *l = problem_dirs; l; l = l->next) { const char *dir_name = (const char*)l->data; log_notice("dir_name:'%s'", dir_name); if (!allowed_problem_dir(dir_name)) { return_InvalidProblemDir_error(invocation, dir_name); goto ret; } } for (GList *l = problem_dirs; l; l = l->next) { const char *dir_name = (const char*)l->data; int dir_fd = dd_openfd(dir_name); if (dir_fd < 0) { perror_msg("can't open problem directory '%s'", dir_name); return_InvalidProblemDir_error(invocation, dir_name); return; } if (!fdump_dir_accessible_by_uid(dir_fd, caller_uid)) { if (errno == ENOTDIR) { log_notice("Requested directory does not exist '%s'", dir_name); close(dir_fd); continue; } if (polkit_check_authorization_dname(caller, "org.freedesktop.problems.getall") != PolkitYes) { // if user didn't provide correct credentials, just move to the next dir close(dir_fd); continue; } } struct dump_dir *dd = dd_fdopendir(dir_fd, dir_name, /*flags:*/ 0); if (dd) { if (dd_delete(dd) != 0) { error_msg("Failed to delete problem directory '%s'", dir_name); dd_close(dd); } } } g_dbus_method_invocation_return_value(invocation, NULL); ret: list_free_with_free(problem_dirs); return; } if (g_strcmp0(method_name, "FindProblemByElementInTimeRange") == 0) { const gchar *element; const gchar *value; glong timestamp_from; glong timestamp_to; gboolean all; g_variant_get_child(parameters, 0, "&s", &element); g_variant_get_child(parameters, 1, "&s", &value); g_variant_get_child(parameters, 2, "x", &timestamp_from); g_variant_get_child(parameters, 3, "x", &timestamp_to); g_variant_get_child(parameters, 4, "b", &all); if (!str_is_correct_filename(element)) { log_notice("'%s' is not a valid element name", element); char *error = xasprintf(_("'%s' is not a valid element name"), element); g_dbus_method_invocation_return_dbus_error(invocation, "org.freedesktop.problems.InvalidElement", error); free(error); return; } if (all && polkit_check_authorization_dname(caller, "org.freedesktop.problems.getall") == PolkitYes) caller_uid = 0; GList *dirs = get_problem_dirs_for_element_in_time(caller_uid, element, value, timestamp_from, timestamp_to); response = variant_from_string_list(dirs); list_free_with_free(dirs); g_dbus_method_invocation_return_value(invocation, response); return; } if (g_strcmp0(method_name, "Quit") == 0) { g_dbus_method_invocation_return_value(invocation, NULL); g_main_loop_quit(loop); return; } }
{ "deleted": [], "added": [ { "line_no": 259, "char_start": 9073, "char_end": 9119, "line": " if (!allowed_problem_dir(problem_id))\n" }, { "line_no": 260, "char_start": 9119, "char_end": 9129, "line": " {\n" }, { "line_no": 261, "char_start": 9129, "char_end": 9197, "line": " return_InvalidProblemDir_error(invocation, problem_id);\n" }, { "line_no": 262, "char_start": 9197, "char_end": 9217, "line": " return;\n" }, { "line_no": 263, "char_start": 9217, "char_end": 9227, "line": " }\n" }, { "line_no": 264, "char_start": 9227, "char_end": 9228, "line": "\n" }, { "line_no": 324, "char_start": 11714, "char_end": 11760, "line": " if (!allowed_problem_dir(problem_id))\n" }, { "line_no": 325, "char_start": 11760, "char_end": 11770, "line": " {\n" }, { "line_no": 326, "char_start": 11770, "char_end": 11838, "line": " return_InvalidProblemDir_error(invocation, problem_id);\n" }, { "line_no": 327, "char_start": 11838, "char_end": 11858, "line": " return;\n" }, { "line_no": 328, "char_start": 11858, "char_end": 11868, "line": " }\n" }, { "line_no": 329, "char_start": 11868, "char_end": 11869, "line": "\n" }, { "line_no": 447, "char_start": 16070, "char_end": 16117, "line": " if (!str_is_correct_filename(element))\n" }, { "line_no": 448, "char_start": 16117, "char_end": 16127, "line": " {\n" }, { "line_no": 449, "char_start": 16127, "char_end": 16196, "line": " log_notice(\"'%s' is not a valid element name\", element);\n" }, { "line_no": 450, "char_start": 16196, "char_end": 16281, "line": " char *error = xasprintf(_(\"'%s' is not a valid element name\"), element);\n" }, { "line_no": 451, "char_start": 16281, "char_end": 16348, "line": " g_dbus_method_invocation_return_dbus_error(invocation,\n" }, { "line_no": 452, "char_start": 16348, "char_end": 16437, "line": " \"org.freedesktop.problems.InvalidElement\",\n" }, { "line_no": 453, "char_start": 16437, "char_end": 16491, "line": " error);\n" }, { "line_no": 454, "char_start": 16491, "char_end": 16492, "line": "\n" }, { "line_no": 455, "char_start": 16492, "char_end": 16517, "line": " free(error);\n" }, { "line_no": 456, "char_start": 16517, "char_end": 16537, "line": " return;\n" }, { "line_no": 457, "char_start": 16537, "char_end": 16547, "line": " }\n" }, { "line_no": 458, "char_start": 16547, "char_end": 16548, "line": "\n" } ] }
{ "deleted": [], "added": [ { "char_start": 9086, "char_end": 9241, "chars": "allowed_problem_dir(problem_id))\n {\n return_InvalidProblemDir_error(invocation, problem_id);\n return;\n }\n\n if (!" }, { "char_start": 11712, "char_end": 11867, "chars": "\n\n if (!allowed_problem_dir(problem_id))\n {\n return_InvalidProblemDir_error(invocation, problem_id);\n return;\n }" }, { "char_start": 16068, "char_end": 16546, "chars": "\n\n if (!str_is_correct_filename(element))\n {\n log_notice(\"'%s' is not a valid element name\", element);\n char *error = xasprintf(_(\"'%s' is not a valid element name\"), element);\n g_dbus_method_invocation_return_dbus_error(invocation,\n \"org.freedesktop.problems.InvalidElement\",\n error);\n\n free(error);\n return;\n }" } ] }
github.com/abrt/abrt/commit/7a47f57975be0d285a2f20758e4572dca6d9cdd3
src/dbus/abrt-dbus.c
cwe-022
_inject_file_into_fs
def _inject_file_into_fs(fs, path, contents): absolute_path = os.path.join(fs, path.lstrip('/')) parent_dir = os.path.dirname(absolute_path) utils.execute('mkdir', '-p', parent_dir, run_as_root=True) utils.execute('tee', absolute_path, process_input=contents, run_as_root=True)
def _inject_file_into_fs(fs, path, contents, append=False): absolute_path = _join_and_check_path_within_fs(fs, path.lstrip('/')) parent_dir = os.path.dirname(absolute_path) utils.execute('mkdir', '-p', parent_dir, run_as_root=True) args = [] if append: args.append('-a') args.append(absolute_path) kwargs = dict(process_input=contents, run_as_root=True) utils.execute('tee', *args, **kwargs)
{ "deleted": [ { "line_no": 5, "char_start": 212, "char_end": 276, "line": " utils.execute('tee', absolute_path, process_input=contents,\n" }, { "line_no": 6, "char_start": 276, "char_end": 303, "line": " run_as_root=True)\n" } ], "added": [ { "line_no": 1, "char_start": 0, "char_end": 60, "line": "def _inject_file_into_fs(fs, path, contents, append=False):\n" }, { "line_no": 2, "char_start": 60, "char_end": 133, "line": " absolute_path = _join_and_check_path_within_fs(fs, path.lstrip('/'))\n" }, { "line_no": 3, "char_start": 133, "char_end": 134, "line": "\n" }, { "line_no": 6, "char_start": 245, "char_end": 246, "line": "\n" }, { "line_no": 7, "char_start": 246, "char_end": 260, "line": " args = []\n" }, { "line_no": 8, "char_start": 260, "char_end": 275, "line": " if append:\n" }, { "line_no": 9, "char_start": 275, "char_end": 301, "line": " args.append('-a')\n" }, { "line_no": 10, "char_start": 301, "char_end": 332, "line": " args.append(absolute_path)\n" }, { "line_no": 11, "char_start": 332, "char_end": 333, "line": "\n" }, { "line_no": 12, "char_start": 333, "char_end": 393, "line": " kwargs = dict(process_input=contents, run_as_root=True)\n" }, { "line_no": 13, "char_start": 393, "char_end": 394, "line": "\n" }, { "line_no": 14, "char_start": 394, "char_end": 435, "line": " utils.execute('tee', *args, **kwargs)\n" } ] }
{ "deleted": [ { "char_start": 67, "char_end": 69, "chars": "s." }, { "char_start": 73, "char_end": 76, "chars": ".jo" }, { "char_start": 216, "char_end": 218, "chars": "ut" }, { "char_start": 219, "char_end": 220, "chars": "l" }, { "char_start": 222, "char_end": 224, "chars": "ex" }, { "char_start": 225, "char_end": 229, "chars": "cute" }, { "char_start": 231, "char_end": 234, "chars": "tee" }, { "char_start": 235, "char_end": 236, "chars": "," }, { "char_start": 250, "char_end": 251, "chars": "," }, { "char_start": 275, "char_end": 285, "chars": "\n " } ], "added": [ { "char_start": 43, "char_end": 57, "chars": ", append=False" }, { "char_start": 80, "char_end": 82, "chars": "_j" }, { "char_start": 83, "char_end": 96, "chars": "in_and_check_" }, { "char_start": 100, "char_end": 105, "chars": "_with" }, { "char_start": 107, "char_end": 110, "chars": "_fs" }, { "char_start": 133, "char_end": 134, "chars": "\n" }, { "char_start": 245, "char_end": 246, "chars": "\n" }, { "char_start": 250, "char_end": 264, "chars": "args = []\n " }, { "char_start": 265, "char_end": 286, "chars": "f append:\n arg" }, { "char_start": 288, "char_end": 291, "chars": "app" }, { "char_start": 292, "char_end": 294, "chars": "nd" }, { "char_start": 296, "char_end": 298, "chars": "-a" }, { "char_start": 299, "char_end": 304, "chars": ")\n " }, { "char_start": 306, "char_end": 318, "chars": "rgs.append(a" }, { "char_start": 330, "char_end": 333, "chars": ")\n\n" }, { "char_start": 334, "char_end": 351, "chars": " kwargs = dict(" }, { "char_start": 392, "char_end": 435, "chars": "\n\n utils.execute('tee', *args, **kwargs)" } ] }
github.com/openstack/nova/commit/2427d4a99bed35baefd8f17ba422cb7aae8dcca7
nova/virt/disk/api.py
cwe-022
set_interface_var
set_interface_var(const char *iface, const char *var, const char *name, uint32_t val) { FILE *fp; char spath[64+IFNAMSIZ]; /* XXX: magic constant */ if (snprintf(spath, sizeof(spath), var, iface) >= sizeof(spath)) return -1; if (access(spath, F_OK) != 0) return -1; fp = fopen(spath, "w"); if (!fp) { if (name) flog(LOG_ERR, "failed to set %s (%u) for %s: %s", name, val, iface, strerror(errno)); return -1; } fprintf(fp, "%u", val); fclose(fp); return 0; }
set_interface_var(const char *iface, const char *var, const char *name, uint32_t val) { FILE *fp; char spath[64+IFNAMSIZ]; /* XXX: magic constant */ if (snprintf(spath, sizeof(spath), var, iface) >= sizeof(spath)) return -1; /* No path traversal */ if (strstr(name, "..") || strchr(name, '/')) return -1; if (access(spath, F_OK) != 0) return -1; fp = fopen(spath, "w"); if (!fp) { if (name) flog(LOG_ERR, "failed to set %s (%u) for %s: %s", name, val, iface, strerror(errno)); return -1; } fprintf(fp, "%u", val); fclose(fp); return 0; }
{ "deleted": [], "added": [ { "line_no": 10, "char_start": 239, "char_end": 264, "line": "\t/* No path traversal */\n" }, { "line_no": 11, "char_start": 264, "char_end": 310, "line": "\tif (strstr(name, \"..\") || strchr(name, '/'))\n" }, { "line_no": 12, "char_start": 310, "char_end": 323, "line": "\t\treturn -1;\n" }, { "line_no": 13, "char_start": 323, "char_end": 324, "line": "\n" } ] }
{ "deleted": [], "added": [ { "char_start": 240, "char_end": 325, "chars": "/* No path traversal */\n\tif (strstr(name, \"..\") || strchr(name, '/'))\n\t\treturn -1;\n\n\t" } ] }
github.com/reubenhwk/radvd/commit/92e22ca23e52066da2258df8c76a2dca8a428bcc
device-linux.c
cwe-022
cut
def cut(self, key): try: self.etcd.delete(os.path.join(self.namespace, key)) except etcd.EtcdKeyNotFound: return False except etcd.EtcdException as err: log_error("Error removing key %s: [%r]" % (key, repr(err))) raise CSStoreError('Error occurred while trying to cut key') return True
def cut(self, key): try: self.etcd.delete(self._absolute_key(key)) except etcd.EtcdKeyNotFound: return False except etcd.EtcdException as err: log_error("Error removing key %s: [%r]" % (key, repr(err))) raise CSStoreError('Error occurred while trying to cut key') return True
{ "deleted": [ { "line_no": 3, "char_start": 37, "char_end": 101, "line": " self.etcd.delete(os.path.join(self.namespace, key))\n" } ], "added": [ { "line_no": 3, "char_start": 37, "char_end": 91, "line": " self.etcd.delete(self._absolute_key(key))\n" } ] }
{ "deleted": [ { "char_start": 66, "char_end": 79, "chars": "os.path.join(" }, { "char_start": 84, "char_end": 85, "chars": "n" }, { "char_start": 86, "char_end": 88, "chars": "me" }, { "char_start": 89, "char_end": 92, "chars": "pac" }, { "char_start": 93, "char_end": 95, "chars": ", " } ], "added": [ { "char_start": 71, "char_end": 72, "chars": "_" }, { "char_start": 73, "char_end": 79, "chars": "bsolut" }, { "char_start": 80, "char_end": 82, "chars": "_k" }, { "char_start": 83, "char_end": 85, "chars": "y(" } ] }
github.com/latchset/custodia/commit/785fc87f38b4811bc4ce43a0a9b2267ee7d500b4
custodia/store/etcdstore.py
cwe-022
wiki_handle_http_request
wiki_handle_http_request(HttpRequest *req) { HttpResponse *res = http_response_new(req); char *page = http_request_get_path_info(req); char *command = http_request_get_query_string(req); char *wikitext = ""; util_dehttpize(page); /* remove any encoding on the requested page name. */ if (!strcmp(page, "/")) { if (access("WikiHome", R_OK) != 0) wiki_redirect(res, "/WikiHome?create"); page = "/WikiHome"; } if (!strcmp(page, "/styles.css")) { /* Return CSS page */ http_response_set_content_type(res, "text/css"); http_response_printf(res, "%s", CssData); http_response_send(res); exit(0); } if (!strcmp(page, "/favicon.ico")) { /* Return favicon */ http_response_set_content_type(res, "image/ico"); http_response_set_data(res, FaviconData, FaviconDataLen); http_response_send(res); exit(0); } page = page + 1; /* skip slash */ if (!strncmp(page, "api/", 4)) { char *p; page += 4; for (p=page; *p != '\0'; p++) if (*p=='?') { *p ='\0'; break; } wiki_handle_rest_call(req, res, page); exit(0); } /* A little safety. issue a malformed request for any paths, * There shouldn't need to be any.. */ if (strchr(page, '/')) { http_response_set_status(res, 404, "Not Found"); http_response_printf(res, "<html><body>404 Not Found</body></html>\n"); http_response_send(res); exit(0); } if (!strcmp(page, "Changes")) { wiki_show_changes_page(res); } else if (!strcmp(page, "ChangesRss")) { wiki_show_changes_page_rss(res); } else if (!strcmp(page, "Search")) { wiki_show_search_results_page(res, http_request_param_get(req, "expr")); } else if (!strcmp(page, "Create")) { if ( (wikitext = http_request_param_get(req, "title")) != NULL) { /* create page and redirect */ wiki_redirect(res, http_request_param_get(req, "title")); } else { /* show create page form */ wiki_show_create_page(res); } } else { /* TODO: dont blindly write wikitext data to disk */ if ( (wikitext = http_request_param_get(req, "wikitext")) != NULL) { file_write(page, wikitext); } if (access(page, R_OK) == 0) /* page exists */ { wikitext = file_read(page); if (!strcmp(command, "edit")) { /* print edit page */ wiki_show_edit_page(res, wikitext, page); } else { wiki_show_page(res, wikitext, page); } } else { if (!strcmp(command, "create")) { wiki_show_edit_page(res, NULL, page); } else { char buf[1024]; snprintf(buf, 1024, "%s?create", page); wiki_redirect(res, buf); } } } }
wiki_handle_http_request(HttpRequest *req) { HttpResponse *res = http_response_new(req); char *page = http_request_get_path_info(req); char *command = http_request_get_query_string(req); char *wikitext = ""; util_dehttpize(page); /* remove any encoding on the requested page name. */ if (!strcmp(page, "/")) { if (access("WikiHome", R_OK) != 0) wiki_redirect(res, "/WikiHome?create"); page = "/WikiHome"; } if (!strcmp(page, "/styles.css")) { /* Return CSS page */ http_response_set_content_type(res, "text/css"); http_response_printf(res, "%s", CssData); http_response_send(res); exit(0); } if (!strcmp(page, "/favicon.ico")) { /* Return favicon */ http_response_set_content_type(res, "image/ico"); http_response_set_data(res, FaviconData, FaviconDataLen); http_response_send(res); exit(0); } page = page + 1; /* skip slash */ if (!strncmp(page, "api/", 4)) { char *p; page += 4; for (p=page; *p != '\0'; p++) if (*p=='?') { *p ='\0'; break; } wiki_handle_rest_call(req, res, page); exit(0); } /* A little safety. issue a malformed request for any paths, * There shouldn't need to be any.. */ if (!page_name_is_good(page)) { http_response_set_status(res, 404, "Not Found"); http_response_printf(res, "<html><body>404 Not Found</body></html>\n"); http_response_send(res); exit(0); } if (!strcmp(page, "Changes")) { wiki_show_changes_page(res); } else if (!strcmp(page, "ChangesRss")) { wiki_show_changes_page_rss(res); } else if (!strcmp(page, "Search")) { wiki_show_search_results_page(res, http_request_param_get(req, "expr")); } else if (!strcmp(page, "Create")) { if ( (wikitext = http_request_param_get(req, "title")) != NULL) { /* create page and redirect */ wiki_redirect(res, http_request_param_get(req, "title")); } else { /* show create page form */ wiki_show_create_page(res); } } else { /* TODO: dont blindly write wikitext data to disk */ if ( (wikitext = http_request_param_get(req, "wikitext")) != NULL) { file_write(page, wikitext); } if (access(page, R_OK) == 0) /* page exists */ { wikitext = file_read(page); if (!strcmp(command, "edit")) { /* print edit page */ wiki_show_edit_page(res, wikitext, page); } else { wiki_show_page(res, wikitext, page); } } else { if (!strcmp(command, "create")) { wiki_show_edit_page(res, NULL, page); } else { char buf[1024]; snprintf(buf, 1024, "%s?create", page); wiki_redirect(res, buf); } } } }
{ "deleted": [ { "line_no": 54, "char_start": 1350, "char_end": 1375, "line": " if (strchr(page, '/'))\n" } ], "added": [ { "line_no": 54, "char_start": 1350, "char_end": 1382, "line": " if (!page_name_is_good(page))\n" } ] }
{ "deleted": [ { "char_start": 1357, "char_end": 1362, "chars": "trchr" }, { "char_start": 1367, "char_end": 1372, "chars": ", '/'" } ], "added": [ { "char_start": 1356, "char_end": 1368, "chars": "!page_name_i" }, { "char_start": 1369, "char_end": 1374, "chars": "_good" } ] }
github.com/yarolig/didiwiki/commit/5e5c796617e1712905dc5462b94bd5e6c08d15ea
src/wiki.c
cwe-022
candidate_paths_for_url
def candidate_paths_for_url(self, url): for root, prefix in self.directories: if url.startswith(prefix): yield os.path.join(root, url[len(prefix):])
def candidate_paths_for_url(self, url): for root, prefix in self.directories: if url.startswith(prefix): path = os.path.join(root, url[len(prefix):]) if os.path.commonprefix((root, path)) == root: yield path
{ "deleted": [ { "line_no": 4, "char_start": 129, "char_end": 188, "line": " yield os.path.join(root, url[len(prefix):])\n" } ], "added": [ { "line_no": 4, "char_start": 129, "char_end": 190, "line": " path = os.path.join(root, url[len(prefix):])\n" }, { "line_no": 5, "char_start": 190, "char_end": 253, "line": " if os.path.commonprefix((root, path)) == root:\n" }, { "line_no": 6, "char_start": 253, "char_end": 283, "line": " yield path\n" } ] }
{ "deleted": [ { "char_start": 145, "char_end": 150, "chars": "yield" } ], "added": [ { "char_start": 145, "char_end": 151, "chars": "path =" }, { "char_start": 189, "char_end": 283, "chars": "\n if os.path.commonprefix((root, path)) == root:\n yield path" } ] }
github.com/evansd/whitenoise/commit/4d8a3ab1e97d7ddb18b3fa8b4909c92bad5529c6
whitenoise/base.py
cwe-022
updateKey
def updateKey(client): """Updates the contents of a key that already exists in our system. Returns an error if the specified key doesn't exist for the specified user. """ global NOT_FOUND global CREATED validateClient(client) client_pub_key = loadClientRSAKey(client) token_data = decodeRequestToken(request.data, client_pub_key) validateNewKeyData(token_data) # Use 'w' flag to replace existing key file with the new key data if os.path.isfile('keys/%s/%s.key' % (client, token_data['name'])): with open('keys/%s/%s.key' % (client, token_data['name']), 'w') as f: f.write(token_data['key']) else: raise FoxlockError(NOT_FOUND, "Key '%s' not found" % token_data['name']) return 'Key successfully updated', CREATED
def updateKey(client): """Updates the contents of a key that already exists in our system. Returns an error if the specified key doesn't exist for the specified user. """ global NOT_FOUND global CREATED validateClient(client) client_pub_key = loadClientRSAKey(client) token_data = decodeRequestToken(request.data, client_pub_key) validateNewKeyData(token_data) validateKeyName(token_data['name']) # Use 'w' flag to replace existing key file with the new key data if os.path.isfile('keys/%s/%s.key' % (client, token_data['name'])): with open('keys/%s/%s.key' % (client, token_data['name']), 'w') as f: f.write(token_data['key']) else: raise FoxlockError(NOT_FOUND, "Key '%s' not found" % token_data['name']) return 'Key successfully updated', CREATED
{ "deleted": [ { "line_no": 9, "char_start": 233, "char_end": 234, "line": "\n" } ], "added": [ { "line_no": 12, "char_start": 371, "char_end": 408, "line": "\tvalidateKeyName(token_data['name'])\n" } ] }
{ "deleted": [ { "char_start": 233, "char_end": 234, "chars": "\n" } ], "added": [ { "char_start": 369, "char_end": 406, "chars": ")\n\tvalidateKeyName(token_data['name']" } ] }
github.com/Mimickal/FoxLock/commit/7c665e556987f4e2c1a75e143a1e80ae066ad833
impl.py
cwe-022
create_basename_core
def create_basename_core(basename): try: basename = basename.casefold() except Exception: basename = basename.lower() basename = basename.replace(' ', '-') basename = re.sub(r'<[^>]*>', r'', basename) basename = re.sub(r'[^a-z0-9\-]', r'', basename) basename = re.sub(r'\-\-', r'-', basename) basename = urllib.parse.quote_plus(basename) return basename
def create_basename_core(basename): try: basename = basename.casefold() except Exception: basename = basename.lower() basename = re.sub(r'[ \./]', r'-', basename) basename = re.sub(r'<[^>]*>', r'', basename) basename = re.sub(r'[^a-z0-9\-]', r'', basename) basename = re.sub(r'\-\-', r'-', basename) basename = urllib.parse.quote_plus(basename) return basename
{ "deleted": [ { "line_no": 7, "char_start": 143, "char_end": 185, "line": " basename = basename.replace(' ', '-')\n" } ], "added": [ { "line_no": 7, "char_start": 143, "char_end": 192, "line": " basename = re.sub(r'[ \\./]', r'-', basename)\n" } ] }
{ "deleted": [ { "char_start": 158, "char_end": 165, "chars": "basenam" }, { "char_start": 168, "char_end": 175, "chars": "eplace(" } ], "added": [ { "char_start": 158, "char_end": 159, "chars": "r" }, { "char_start": 161, "char_end": 164, "chars": "sub" }, { "char_start": 165, "char_end": 166, "chars": "r" }, { "char_start": 167, "char_end": 168, "chars": "[" }, { "char_start": 169, "char_end": 173, "chars": "\\./]" }, { "char_start": 176, "char_end": 177, "chars": "r" }, { "char_start": 180, "char_end": 190, "chars": ", basename" } ] }
github.com/syegulalp/mercury/commit/3f7c7442fa49aec37577dbdb47ce11a848e7bd03
MeTal/core/utils.py
cwe-022
get
def get(self, key): try: result = self.etcd.get(os.path.join(self.namespace, key)) except etcd.EtcdException as err: log_error("Error fetching key %s: [%r]" % (key, repr(err))) raise CSStoreError('Error occurred while trying to get key') return result.value
def get(self, key): try: result = self.etcd.get(self._absolute_key(key)) except etcd.EtcdException as err: log_error("Error fetching key %s: [%r]" % (key, repr(err))) raise CSStoreError('Error occurred while trying to get key') return result.value
{ "deleted": [ { "line_no": 3, "char_start": 37, "char_end": 107, "line": " result = self.etcd.get(os.path.join(self.namespace, key))\n" } ], "added": [ { "line_no": 3, "char_start": 37, "char_end": 97, "line": " result = self.etcd.get(self._absolute_key(key))\n" } ] }
{ "deleted": [ { "char_start": 72, "char_end": 85, "chars": "os.path.join(" }, { "char_start": 90, "char_end": 91, "chars": "n" }, { "char_start": 92, "char_end": 94, "chars": "me" }, { "char_start": 95, "char_end": 98, "chars": "pac" }, { "char_start": 99, "char_end": 101, "chars": ", " } ], "added": [ { "char_start": 77, "char_end": 78, "chars": "_" }, { "char_start": 79, "char_end": 85, "chars": "bsolut" }, { "char_start": 86, "char_end": 88, "chars": "_k" }, { "char_start": 89, "char_end": 91, "chars": "y(" } ] }
github.com/latchset/custodia/commit/785fc87f38b4811bc4ce43a0a9b2267ee7d500b4
custodia/store/etcdstore.py
cwe-022
PHYSICALPATH_FUNC
PHYSICALPATH_FUNC(mod_alias_physical_handler) { plugin_data *p = p_d; int uri_len, basedir_len; char *uri_ptr; size_t k; if (buffer_is_empty(con->physical.path)) return HANDLER_GO_ON; mod_alias_patch_connection(srv, con, p); /* not to include the tailing slash */ basedir_len = buffer_string_length(con->physical.basedir); if ('/' == con->physical.basedir->ptr[basedir_len-1]) --basedir_len; uri_len = buffer_string_length(con->physical.path) - basedir_len; uri_ptr = con->physical.path->ptr + basedir_len; for (k = 0; k < p->conf.alias->used; k++) { data_string *ds = (data_string *)p->conf.alias->data[k]; int alias_len = buffer_string_length(ds->key); if (alias_len > uri_len) continue; if (buffer_is_empty(ds->key)) continue; if (0 == (con->conf.force_lowercase_filenames ? strncasecmp(uri_ptr, ds->key->ptr, alias_len) : strncmp(uri_ptr, ds->key->ptr, alias_len))) { /* matched */ buffer_copy_buffer(con->physical.basedir, ds->value); buffer_copy_buffer(srv->tmp_buf, ds->value); buffer_append_string(srv->tmp_buf, uri_ptr + alias_len); buffer_copy_buffer(con->physical.path, srv->tmp_buf); return HANDLER_GO_ON; } } /* not found */ return HANDLER_GO_ON; }
PHYSICALPATH_FUNC(mod_alias_physical_handler) { plugin_data *p = p_d; int uri_len, basedir_len; char *uri_ptr; size_t k; if (buffer_is_empty(con->physical.path)) return HANDLER_GO_ON; mod_alias_patch_connection(srv, con, p); /* not to include the tailing slash */ basedir_len = buffer_string_length(con->physical.basedir); if ('/' == con->physical.basedir->ptr[basedir_len-1]) --basedir_len; uri_len = buffer_string_length(con->physical.path) - basedir_len; uri_ptr = con->physical.path->ptr + basedir_len; for (k = 0; k < p->conf.alias->used; k++) { data_string *ds = (data_string *)p->conf.alias->data[k]; int alias_len = buffer_string_length(ds->key); if (alias_len > uri_len) continue; if (buffer_is_empty(ds->key)) continue; if (0 == (con->conf.force_lowercase_filenames ? strncasecmp(uri_ptr, ds->key->ptr, alias_len) : strncmp(uri_ptr, ds->key->ptr, alias_len))) { /* matched */ /* check for path traversal in url-path following alias if key * does not end in slash, but replacement value ends in slash */ if (uri_ptr[alias_len] == '.') { char *s = uri_ptr + alias_len + 1; if (*s == '.') ++s; if (*s == '/' || *s == '\0') { size_t vlen = buffer_string_length(ds->value); if (0 != alias_len && ds->key->ptr[alias_len-1] != '/' && 0 != vlen && ds->value->ptr[vlen-1] == '/') { con->http_status = 403; return HANDLER_FINISHED; } } } buffer_copy_buffer(con->physical.basedir, ds->value); buffer_copy_buffer(srv->tmp_buf, ds->value); buffer_append_string(srv->tmp_buf, uri_ptr + alias_len); buffer_copy_buffer(con->physical.path, srv->tmp_buf); return HANDLER_GO_ON; } } /* not found */ return HANDLER_GO_ON; }
{ "deleted": [], "added": [ { "line_no": 29, "char_start": 928, "char_end": 994, "line": "\t\t\t/* check for path traversal in url-path following alias if key\n" }, { "line_no": 30, "char_start": 994, "char_end": 1062, "line": "\t\t\t * does not end in slash, but replacement value ends in slash */\n" }, { "line_no": 31, "char_start": 1062, "char_end": 1098, "line": "\t\t\tif (uri_ptr[alias_len] == '.') {\n" }, { "line_no": 32, "char_start": 1098, "char_end": 1137, "line": "\t\t\t\tchar *s = uri_ptr + alias_len + 1;\n" }, { "line_no": 33, "char_start": 1137, "char_end": 1161, "line": "\t\t\t\tif (*s == '.') ++s;\n" }, { "line_no": 34, "char_start": 1161, "char_end": 1196, "line": "\t\t\t\tif (*s == '/' || *s == '\\0') {\n" }, { "line_no": 35, "char_start": 1196, "char_end": 1248, "line": "\t\t\t\t\tsize_t vlen = buffer_string_length(ds->value);\n" }, { "line_no": 36, "char_start": 1248, "char_end": 1308, "line": "\t\t\t\t\tif (0 != alias_len && ds->key->ptr[alias_len-1] != '/'\n" }, { "line_no": 37, "char_start": 1308, "char_end": 1366, "line": "\t\t\t\t\t && 0 != vlen && ds->value->ptr[vlen-1] == '/') {\n" }, { "line_no": 38, "char_start": 1366, "char_end": 1396, "line": "\t\t\t\t\t\tcon->http_status = 403;\n" }, { "line_no": 39, "char_start": 1396, "char_end": 1427, "line": "\t\t\t\t\t\treturn HANDLER_FINISHED;\n" }, { "line_no": 40, "char_start": 1427, "char_end": 1434, "line": "\t\t\t\t\t}\n" }, { "line_no": 41, "char_start": 1434, "char_end": 1440, "line": "\t\t\t\t}\n" }, { "line_no": 42, "char_start": 1440, "char_end": 1445, "line": "\t\t\t}\n" }, { "line_no": 43, "char_start": 1445, "char_end": 1446, "line": "\n" } ] }
{ "deleted": [], "added": [ { "char_start": 931, "char_end": 1449, "chars": "/* check for path traversal in url-path following alias if key\n\t\t\t * does not end in slash, but replacement value ends in slash */\n\t\t\tif (uri_ptr[alias_len] == '.') {\n\t\t\t\tchar *s = uri_ptr + alias_len + 1;\n\t\t\t\tif (*s == '.') ++s;\n\t\t\t\tif (*s == '/' || *s == '\\0') {\n\t\t\t\t\tsize_t vlen = buffer_string_length(ds->value);\n\t\t\t\t\tif (0 != alias_len && ds->key->ptr[alias_len-1] != '/'\n\t\t\t\t\t && 0 != vlen && ds->value->ptr[vlen-1] == '/') {\n\t\t\t\t\t\tcon->http_status = 403;\n\t\t\t\t\t\treturn HANDLER_FINISHED;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t" } ] }
github.com/lighttpd/lighttpd1.4/commit/2105dae0f9d7a964375ce681e53cb165375f84c1
src/mod_alias.c
cwe-022
download_check_files
def download_check_files(self, filelist): # only admins and allowed users may download if not cherrypy.session['admin']: uo = self.useroptions.forUser(self.getUserId()) if not uo.getOptionValue('media.may_download'): return 'not_permitted' # make sure nobody tries to escape from basedir for f in filelist: if '/../' in f: return 'invalid_file' # make sure all files are smaller than maximum download size size_limit = cherry.config['media.maximum_download_size'] try: if self.model.file_size_within_limit(filelist, size_limit): return 'ok' else: return 'too_big' except OSError as e: # use OSError for python2 compatibility return str(e)
def download_check_files(self, filelist): # only admins and allowed users may download if not cherrypy.session['admin']: uo = self.useroptions.forUser(self.getUserId()) if not uo.getOptionValue('media.may_download'): return 'not_permitted' # make sure nobody tries to escape from basedir for f in filelist: # don't allow to traverse up in the file system if '/../' in f or f.startswith('../'): return 'invalid_file' # CVE-2015-8309: do not allow absolute file paths if os.path.isabs(f): return 'invalid_file' # make sure all files are smaller than maximum download size size_limit = cherry.config['media.maximum_download_size'] try: if self.model.file_size_within_limit(filelist, size_limit): return 'ok' else: return 'too_big' except OSError as e: # use OSError for python2 compatibility return str(e)
{ "deleted": [ { "line_no": 9, "char_start": 383, "char_end": 411, "line": " if '/../' in f:\n" } ], "added": [ { "line_no": 10, "char_start": 443, "char_end": 494, "line": " if '/../' in f or f.startswith('../'):\n" }, { "line_no": 11, "char_start": 494, "char_end": 532, "line": " return 'invalid_file'\n" }, { "line_no": 13, "char_start": 594, "char_end": 627, "line": " if os.path.isabs(f):\n" } ] }
{ "deleted": [], "added": [ { "char_start": 395, "char_end": 455, "chars": "# don't allow to traverse up in the file system\n " }, { "char_start": 469, "char_end": 625, "chars": " or f.startswith('../'):\n return 'invalid_file'\n # CVE-2015-8309: do not allow absolute file paths\n if os.path.isabs(f)" } ] }
github.com/devsnd/cherrymusic/commit/62dec34a1ea0741400dd6b6c660d303dcd651e86
cherrymusicserver/httphandler.py
cwe-022
get_files
def get_files(self, submit_id, password=None, astree=False): """ Returns files from a submitted analysis. @param password: The password to unlock container archives with @param astree: sflock option; determines the format in which the files are returned @return: A tree of files """ submit = Database().view_submit(submit_id) files, duplicates = [], [] for data in submit.data["data"]: if data["type"] == "file": filename = Storage.get_filename_from_path(data["data"]) filepath = os.path.join(submit.tmp_path, data["data"]) filedata = open(filepath, "rb").read() unpacked = sflock.unpack( filepath=filename, contents=filedata, password=password, duplicates=duplicates ) if astree: unpacked = unpacked.astree() files.append(unpacked) elif data["type"] == "url": files.append({ "filename": data["data"], "filepath": "", "relapath": "", "selected": True, "size": 0, "type": "url", "package": "ie", "extrpath": [], "duplicate": False, "children": [], "mime": "text/html", "finger": { "magic_human": "url", "magic": "url" } }) else: raise RuntimeError( "Unknown data entry type: %s" % data["type"] ) return { "files": files, "path": submit.tmp_path, }
def get_files(self, submit_id, password=None, astree=False): """ Returns files from a submitted analysis. @param password: The password to unlock container archives with @param astree: sflock option; determines the format in which the files are returned @return: A tree of files """ submit = Database().view_submit(submit_id) files, duplicates = [], [] for data in submit.data["data"]: if data["type"] == "file": filename = Storage.get_filename_from_path(data["data"]) filepath = os.path.join(submit.tmp_path, filename) unpacked = sflock.unpack( filepath=filepath, password=password, duplicates=duplicates ) if astree: unpacked = unpacked.astree(sanitize=True) files.append(unpacked) elif data["type"] == "url": files.append({ "filename": data["data"], "filepath": "", "relapath": "", "selected": True, "size": 0, "type": "url", "package": "ie", "extrpath": [], "duplicate": False, "children": [], "mime": "text/html", "finger": { "magic_human": "url", "magic": "url" } }) else: raise RuntimeError( "Unknown data entry type: %s" % data["type"] ) return files
{ "deleted": [ { "line_no": 14, "char_start": 574, "char_end": 645, "line": " filepath = os.path.join(submit.tmp_path, data[\"data\"])\n" }, { "line_no": 15, "char_start": 645, "char_end": 700, "line": " filedata = open(filepath, \"rb\").read()\n" }, { "line_no": 18, "char_start": 743, "char_end": 801, "line": " filepath=filename, contents=filedata,\n" }, { "line_no": 19, "char_start": 801, "char_end": 862, "line": " password=password, duplicates=duplicates\n" }, { "line_no": 23, "char_start": 908, "char_end": 957, "line": " unpacked = unpacked.astree()\n" }, { "line_no": 49, "char_start": 1776, "char_end": 1793, "line": " return {\n" }, { "line_no": 50, "char_start": 1793, "char_end": 1821, "line": " \"files\": files,\n" }, { "line_no": 51, "char_start": 1821, "char_end": 1858, "line": " \"path\": submit.tmp_path,\n" }, { "line_no": 52, "char_start": 1858, "char_end": 1867, "line": " }\n" } ], "added": [ { "line_no": 14, "char_start": 574, "char_end": 641, "line": " filepath = os.path.join(submit.tmp_path, filename)\n" }, { "line_no": 17, "char_start": 684, "char_end": 764, "line": " filepath=filepath, password=password, duplicates=duplicates\n" }, { "line_no": 21, "char_start": 810, "char_end": 872, "line": " unpacked = unpacked.astree(sanitize=True)\n" }, { "line_no": 47, "char_start": 1691, "char_end": 1711, "line": " return files\n" } ] }
{ "deleted": [ { "char_start": 631, "char_end": 661, "chars": "data[\"data\"])\n " }, { "char_start": 665, "char_end": 675, "chars": "data = ope" }, { "char_start": 676, "char_end": 682, "chars": "(filep" }, { "char_start": 683, "char_end": 694, "chars": "th, \"rb\").r" }, { "char_start": 695, "char_end": 698, "chars": "ad(" }, { "char_start": 776, "char_end": 796, "chars": "name, contents=filed" }, { "char_start": 798, "char_end": 799, "chars": "a" }, { "char_start": 800, "char_end": 820, "chars": "\n " }, { "char_start": 1791, "char_end": 1806, "chars": "{\n \"" }, { "char_start": 1811, "char_end": 1867, "chars": "\": files,\n \"path\": submit.tmp_path,\n }" } ], "added": [ { "char_start": 637, "char_end": 638, "chars": "m" }, { "char_start": 717, "char_end": 718, "chars": "p" }, { "char_start": 720, "char_end": 721, "chars": "h" }, { "char_start": 857, "char_end": 870, "chars": "sanitize=True" } ] }
github.com/cuckoosandbox/cuckoo/commit/168cabf86730d56b7fa319278bf0f0034052666a
cuckoo/core/submit.py
cwe-022
handle
def handle(self, keepalive=True, initial_timeout=None): # we are requested to skip processing and keep the previous values if self.skip: return self.response.handle() # default to no keepalive in case something happens while even trying ensure we have a request self.keepalive = False self.headers = HTTPHeaders() # if initial_timeout is set, only wait that long for the initial request line if initial_timeout: self.connection.settimeout(initial_timeout) else: self.connection.settimeout(self.timeout) # get request line try: # ignore empty lines waiting on request request = '\r\n' while request == '\r\n': request = self.rfile.readline(max_line_size + 1).decode(http_encoding) # if read hits timeout or has some other error, ignore the request except Exception: return True # ignore empty requests if not request: return True # we have a request, go back to normal timeout if initial_timeout: self.connection.settimeout(self.timeout) # remove \r\n from the end self.request_line = request[:-2] # set some reasonable defaults in case the worst happens and we need to tell the client self.method = '' self.resource = '/' try: # HTTP Status 414 if len(request) > max_line_size: raise HTTPError(414) # HTTP Status 400 if request[-2:] != '\r\n': raise HTTPError(400) # try the request line and error out if can't parse it try: self.method, self.resource, self.request_http = self.request_line.split() # HTTP Status 400 except ValueError: raise HTTPError(400) # HTTP Status 505 if self.request_http != http_version: raise HTTPError(505) # read and parse request headers while True: line = self.rfile.readline(max_line_size + 1).decode(http_encoding) # hit end of headers if line == '\r\n': break self.headers.add(line) # if we are requested to close the connection after we finish, do so if self.headers.get('Connection') == 'close': self.keepalive = False # else since we are sure we have a request and have read all of the request data, keepalive for more later (if allowed) else: self.keepalive = keepalive # find a matching regex to handle the request with for regex, handler in self.server.routes.items(): match = regex.match(self.resource) if match: # create a dictionary of groups groups = match.groupdict() values = groups.values() for idx, group in enumerate(match.groups()): if group not in values: groups[idx] = group # create handler self.handler = handler(self, self.response, groups) break # HTTP Status 404 # if loop is not broken (handler is not found), raise a 404 else: raise HTTPError(404) # use DummyHandler so the error is raised again when ready for response except Exception as error: self.handler = DummyHandler(self, self.response, (), error) finally: # we finished listening and handling early errors and so let a response class now finish up the job of talking return self.response.handle()
def handle(self, keepalive=True, initial_timeout=None): # we are requested to skip processing and keep the previous values if self.skip: return self.response.handle() # default to no keepalive in case something happens while even trying ensure we have a request self.keepalive = False self.headers = HTTPHeaders() # if initial_timeout is set, only wait that long for the initial request line if initial_timeout: self.connection.settimeout(initial_timeout) else: self.connection.settimeout(self.timeout) # get request line try: # ignore empty lines waiting on request request = '\r\n' while request == '\r\n': request = self.rfile.readline(max_line_size + 1).decode(http_encoding) # if read hits timeout or has some other error, ignore the request except Exception: return True # ignore empty requests if not request: return True # we have a request, go back to normal timeout if initial_timeout: self.connection.settimeout(self.timeout) # remove \r\n from the end self.request_line = request[:-2] # set some reasonable defaults in case the worst happens and we need to tell the client self.method = '' self.resource = '/' try: # HTTP Status 414 if len(request) > max_line_size: raise HTTPError(414) # HTTP Status 400 if request[-2:] != '\r\n': raise HTTPError(400) # try the request line and error out if can't parse it try: self.method, resource, self.request_http = self.request_line.split() self.resource = urllib.parse.unquote(resource) # HTTP Status 400 except ValueError: raise HTTPError(400) # HTTP Status 505 if self.request_http != http_version: raise HTTPError(505) # read and parse request headers while True: line = self.rfile.readline(max_line_size + 1).decode(http_encoding) # hit end of headers if line == '\r\n': break self.headers.add(line) # if we are requested to close the connection after we finish, do so if self.headers.get('Connection') == 'close': self.keepalive = False # else since we are sure we have a request and have read all of the request data, keepalive for more later (if allowed) else: self.keepalive = keepalive # find a matching regex to handle the request with for regex, handler in self.server.routes.items(): match = regex.match(self.resource) if match: # create a dictionary of groups groups = match.groupdict() values = groups.values() for idx, group in enumerate(match.groups()): if group not in values: groups[idx] = group # create handler self.handler = handler(self, self.response, groups) break # HTTP Status 404 # if loop is not broken (handler is not found), raise a 404 else: raise HTTPError(404) # use DummyHandler so the error is raised again when ready for response except Exception as error: self.handler = DummyHandler(self, self.response, (), error) finally: # we finished listening and handling early errors and so let a response class now finish up the job of talking return self.response.handle()
{ "deleted": [ { "line_no": 53, "char_start": 1744, "char_end": 1834, "line": " self.method, self.resource, self.request_http = self.request_line.split()\n" } ], "added": [ { "line_no": 53, "char_start": 1744, "char_end": 1829, "line": " self.method, resource, self.request_http = self.request_line.split()\n" }, { "line_no": 54, "char_start": 1829, "char_end": 1892, "line": " self.resource = urllib.parse.unquote(resource)\n" } ] }
{ "deleted": [ { "char_start": 1773, "char_end": 1778, "chars": "self." } ], "added": [ { "char_start": 1827, "char_end": 1890, "chars": ")\n self.resource = urllib.parse.unquote(resource" } ] }
github.com/fkmclane/python-fooster-web/commit/80202a6d3788ad1212a162d19785c600025e6aa4
fooster/web/web.py
cwe-022
dd_delete_item
int dd_delete_item(struct dump_dir *dd, const char *name) { if (!dd->locked) error_msg_and_die("dump_dir is not opened"); /* bug */ char *path = concat_path_file(dd->dd_dirname, name); int res = unlink(path); if (res < 0) { if (errno == ENOENT) errno = res = 0; else perror_msg("Can't delete file '%s'", path); } free(path); return res; }
int dd_delete_item(struct dump_dir *dd, const char *name) { if (!dd->locked) error_msg_and_die("dump_dir is not opened"); /* bug */ if (!str_is_correct_filename(name)) error_msg_and_die("Cannot delete item. '%s' is not a valid file name", name); char *path = concat_path_file(dd->dd_dirname, name); int res = unlink(path); if (res < 0) { if (errno == ENOENT) errno = res = 0; else perror_msg("Can't delete file '%s'", path); } free(path); return res; }
{ "deleted": [], "added": [ { "line_no": 6, "char_start": 145, "char_end": 185, "line": " if (!str_is_correct_filename(name))\n" }, { "line_no": 7, "char_start": 185, "char_end": 271, "line": " error_msg_and_die(\"Cannot delete item. '%s' is not a valid file name\", name);\n" }, { "line_no": 8, "char_start": 271, "char_end": 272, "line": "\n" } ] }
{ "deleted": [], "added": [ { "char_start": 149, "char_end": 276, "chars": "if (!str_is_correct_filename(name))\n error_msg_and_die(\"Cannot delete item. '%s' is not a valid file name\", name);\n\n " } ] }
github.com/abrt/libreport/commit/239c4f7d1f47265526b39ad70106767d00805277
src/lib/dump_dir.c
cwe-022
render
def render(self, request): action = "download" if "action" in request.args: action = request.args["action"][0] if "file" in request.args: filename = request.args["file"][0].decode('utf-8', 'ignore').encode('utf-8') filename = re.sub("^/+", "/", os.path.realpath(filename)) if not os.path.exists(filename): return "File '%s' not found" % (filename) if action == "stream": name = "stream" if "name" in request.args: name = request.args["name"][0] port = config.OpenWebif.port.value proto = 'http' if request.isSecure(): port = config.OpenWebif.https_port.value proto = 'https' ourhost = request.getHeader('host') m = re.match('.+\:(\d+)$', ourhost) if m is not None: port = m.group(1) response = "#EXTM3U\n#EXTVLCOPT--http-reconnect=true\n#EXTINF:-1,%s\n%s://%s:%s/file?action=download&file=%s" % (name, proto, request.getRequestHostname(), port, quote(filename)) request.setHeader("Content-Disposition", 'attachment;filename="%s.m3u"' % name) request.setHeader("Content-Type", "application/x-mpegurl") return response elif action == "delete": request.setResponseCode(http.OK) return "TODO: DELETE FILE: %s" % (filename) elif action == "download": request.setHeader("Content-Disposition", "attachment;filename=\"%s\"" % (filename.split('/')[-1])) rfile = static.File(filename, defaultType = "application/octet-stream") return rfile.render(request) else: return "wrong action parameter" if "dir" in request.args: path = request.args["dir"][0] pattern = '*' data = [] if "pattern" in request.args: pattern = request.args["pattern"][0] directories = [] files = [] if fileExists(path): try: files = glob.glob(path+'/'+pattern) except: files = [] files.sort() tmpfiles = files[:] for x in tmpfiles: if os.path.isdir(x): directories.append(x + '/') files.remove(x) data.append({"result": True,"dirs": directories,"files": files}) else: data.append({"result": False,"message": "path %s not exits" % (path)}) request.setHeader("content-type", "application/json; charset=utf-8") return json.dumps(data, indent=2)
def render(self, request): action = "download" if "action" in request.args: action = request.args["action"][0] if "file" in request.args: filename = lenient_force_utf_8(request.args["file"][0]) filename = sanitise_filename_slashes(os.path.realpath(filename)) if not os.path.exists(filename): return "File '%s' not found" % (filename) if action == "stream": name = "stream" if "name" in request.args: name = request.args["name"][0] port = config.OpenWebif.port.value proto = 'http' if request.isSecure(): port = config.OpenWebif.https_port.value proto = 'https' ourhost = request.getHeader('host') m = re.match('.+\:(\d+)$', ourhost) if m is not None: port = m.group(1) response = "#EXTM3U\n#EXTVLCOPT--http-reconnect=true\n#EXTINF:-1,%s\n%s://%s:%s/file?action=download&file=%s" % (name, proto, request.getRequestHostname(), port, quote(filename)) request.setHeader("Content-Disposition", 'attachment;filename="%s.m3u"' % name) request.setHeader("Content-Type", "application/x-mpegurl") return response elif action == "delete": request.setResponseCode(http.OK) return "TODO: DELETE FILE: %s" % (filename) elif action == "download": request.setHeader("Content-Disposition", "attachment;filename=\"%s\"" % (filename.split('/')[-1])) rfile = static.File(filename, defaultType = "application/octet-stream") return rfile.render(request) else: return "wrong action parameter" if "dir" in request.args: path = request.args["dir"][0] pattern = '*' data = [] if "pattern" in request.args: pattern = request.args["pattern"][0] directories = [] files = [] if fileExists(path): try: files = glob.glob(path+'/'+pattern) except: files = [] files.sort() tmpfiles = files[:] for x in tmpfiles: if os.path.isdir(x): directories.append(x + '/') files.remove(x) data.append({"result": True,"dirs": directories,"files": files}) else: data.append({"result": False,"message": "path %s not exits" % (path)}) request.setHeader("content-type", "application/json; charset=utf-8") return json.dumps(data, indent=2)
{ "deleted": [ { "line_no": 7, "char_start": 149, "char_end": 229, "line": "\t\t\tfilename = request.args[\"file\"][0].decode('utf-8', 'ignore').encode('utf-8')\n" }, { "line_no": 8, "char_start": 229, "char_end": 290, "line": "\t\t\tfilename = re.sub(\"^/+\", \"/\", os.path.realpath(filename))\n" } ], "added": [ { "line_no": 7, "char_start": 149, "char_end": 208, "line": "\t\t\tfilename = lenient_force_utf_8(request.args[\"file\"][0])\n" }, { "line_no": 8, "char_start": 208, "char_end": 276, "line": "\t\t\tfilename = sanitise_filename_slashes(os.path.realpath(filename))\n" } ] }
{ "deleted": [ { "char_start": 186, "char_end": 227, "chars": ".decode('utf-8', 'ignore').encode('utf-8'" }, { "char_start": 243, "char_end": 244, "chars": "r" }, { "char_start": 245, "char_end": 246, "chars": "." }, { "char_start": 247, "char_end": 249, "chars": "ub" }, { "char_start": 250, "char_end": 262, "chars": "\"^/+\", \"/\", " } ], "added": [ { "char_start": 163, "char_end": 183, "chars": "lenient_force_utf_8(" }, { "char_start": 222, "char_end": 238, "chars": "sanitise_filenam" }, { "char_start": 239, "char_end": 240, "chars": "_" }, { "char_start": 241, "char_end": 247, "chars": "lashes" } ] }
github.com/E2OpenPlugins/e2openplugin-OpenWebif/commit/a846b7664eda3a4c51a452e00638cf7337dc2013
plugin/controllers/file.py
cwe-022
_inject_net_into_fs
def _inject_net_into_fs(net, fs, execute=None): """Inject /etc/network/interfaces into the filesystem rooted at fs. net is the contents of /etc/network/interfaces. """ netdir = os.path.join(os.path.join(fs, 'etc'), 'network') utils.execute('mkdir', '-p', netdir, run_as_root=True) utils.execute('chown', 'root:root', netdir, run_as_root=True) utils.execute('chmod', 755, netdir, run_as_root=True) netfile = os.path.join(netdir, 'interfaces') utils.execute('tee', netfile, process_input=net, run_as_root=True)
def _inject_net_into_fs(net, fs, execute=None): """Inject /etc/network/interfaces into the filesystem rooted at fs. net is the contents of /etc/network/interfaces. """ netdir = _join_and_check_path_within_fs(fs, 'etc', 'network') utils.execute('mkdir', '-p', netdir, run_as_root=True) utils.execute('chown', 'root:root', netdir, run_as_root=True) utils.execute('chmod', 755, netdir, run_as_root=True) netfile = os.path.join('etc', 'network', 'interfaces') _inject_file_into_fs(fs, netfile, net)
{ "deleted": [ { "line_no": 6, "char_start": 181, "char_end": 243, "line": " netdir = os.path.join(os.path.join(fs, 'etc'), 'network')\n" }, { "line_no": 10, "char_start": 426, "char_end": 475, "line": " netfile = os.path.join(netdir, 'interfaces')\n" }, { "line_no": 11, "char_start": 475, "char_end": 545, "line": " utils.execute('tee', netfile, process_input=net, run_as_root=True)\n" } ], "added": [ { "line_no": 6, "char_start": 181, "char_end": 247, "line": " netdir = _join_and_check_path_within_fs(fs, 'etc', 'network')\n" }, { "line_no": 10, "char_start": 430, "char_end": 431, "line": "\n" }, { "line_no": 11, "char_start": 431, "char_end": 490, "line": " netfile = os.path.join('etc', 'network', 'interfaces')\n" }, { "line_no": 12, "char_start": 490, "char_end": 532, "line": " _inject_file_into_fs(fs, netfile, net)\n" } ] }
{ "deleted": [ { "char_start": 194, "char_end": 202, "chars": "os.path." }, { "char_start": 206, "char_end": 210, "chars": "(os." }, { "char_start": 214, "char_end": 217, "chars": ".jo" }, { "char_start": 229, "char_end": 230, "chars": ")" }, { "char_start": 456, "char_end": 458, "chars": "di" }, { "char_start": 479, "char_end": 480, "chars": "u" }, { "char_start": 483, "char_end": 485, "chars": "s." }, { "char_start": 486, "char_end": 490, "chars": "xecu" }, { "char_start": 491, "char_end": 492, "chars": "e" }, { "char_start": 493, "char_end": 498, "chars": "'tee'" }, { "char_start": 509, "char_end": 523, "chars": "process_input=" }, { "char_start": 526, "char_end": 544, "chars": ", run_as_root=True" } ], "added": [ { "char_start": 194, "char_end": 196, "chars": "_j" }, { "char_start": 197, "char_end": 210, "chars": "in_and_check_" }, { "char_start": 214, "char_end": 216, "chars": "_w" }, { "char_start": 221, "char_end": 224, "chars": "_fs" }, { "char_start": 430, "char_end": 431, "chars": "\n" }, { "char_start": 458, "char_end": 466, "chars": "'etc', '" }, { "char_start": 469, "char_end": 471, "chars": "wo" }, { "char_start": 472, "char_end": 474, "chars": "k'" }, { "char_start": 494, "char_end": 495, "chars": "_" }, { "char_start": 496, "char_end": 498, "chars": "nj" }, { "char_start": 501, "char_end": 505, "chars": "_fil" }, { "char_start": 506, "char_end": 509, "chars": "_in" }, { "char_start": 510, "char_end": 517, "chars": "o_fs(fs" } ] }
github.com/openstack/nova/commit/2427d4a99bed35baefd8f17ba422cb7aae8dcca7
nova/virt/disk/api.py
cwe-022
canonicalize
def canonicalize(self): """:: path = path.canonicalize() Canonicalize path. :: # "/foo/baz" Pyjo.Path.new('/foo/./bar/../baz').canonicalize() # "/../baz" Pyjo.Path.new('/foo/../bar/../../baz').canonicalize() """ parts = self.parts i = 0 while i < len(parts): if parts[i] == '.' or parts[i] == '': parts.pop(i) elif i < 1 or parts[i] != '..' or parts[i - 1] == '..': i += 1 else: i -= 1 parts.pop(i) parts.pop(i) if not parts: self.trailing_slash = False return self
def canonicalize(self): """:: path = path.canonicalize() Canonicalize path by resolving ``.`` and ``..``, in addition ``...`` will be treated as ``.`` to protect from path traversal attacks. # "/foo/baz" Pyjo.Path.new('/foo/./bar/../baz').canonicalize() # "/../baz" Pyjo.Path.new('/foo/../bar/../../baz').canonicalize() # "/foo/bar" Pyjo.Path.new('/foo/.../bar').canonicalize() """ parts = self.parts i = 0 while i < len(parts): if parts[i] == '' or parts[i] == '.' or parts[i] == '...': parts.pop(i) elif i < 1 or parts[i] != '..' or parts[i - 1] == '..': i += 1 else: i -= 1 parts.pop(i) parts.pop(i) if not parts: self.trailing_slash = False return self
{ "deleted": [ { "line_no": 6, "char_start": 83, "char_end": 113, "line": " Canonicalize path. ::\n" }, { "line_no": 17, "char_start": 375, "char_end": 425, "line": " if parts[i] == '.' or parts[i] == '':\n" } ], "added": [ { "line_no": 6, "char_start": 83, "char_end": 168, "line": " Canonicalize path by resolving ``.`` and ``..``, in addition ``...`` will be\n" }, { "line_no": 7, "char_start": 168, "char_end": 233, "line": " treated as ``.`` to protect from path traversal attacks.\n" }, { "line_no": 14, "char_start": 412, "char_end": 413, "line": "\n" }, { "line_no": 16, "char_start": 438, "char_end": 495, "line": " Pyjo.Path.new('/foo/.../bar').canonicalize()\n" }, { "line_no": 21, "char_start": 578, "char_end": 649, "line": " if parts[i] == '' or parts[i] == '.' or parts[i] == '...':\n" } ] }
{ "deleted": [ { "char_start": 110, "char_end": 112, "chars": "::" } ], "added": [ { "char_start": 108, "char_end": 124, "chars": " by resolving ``" }, { "char_start": 125, "char_end": 171, "chars": "`` and ``..``, in addition ``...`` will be\n " }, { "char_start": 172, "char_end": 232, "chars": " treated as ``.`` to protect from path traversal attacks." }, { "char_start": 411, "char_end": 494, "chars": "\n\n # \"/foo/bar\"\n Pyjo.Path.new('/foo/.../bar').canonicalize()" }, { "char_start": 606, "char_end": 624, "chars": "' or parts[i] == '" }, { "char_start": 643, "char_end": 646, "chars": "..." } ] }
github.com/dex4er/Pyjoyment/commit/e4b115bc80c41615b2133091af3a74ee5d995c2e
Pyjo/Path.py
cwe-022
CWebSock::GetSkinPath
CString CWebSock::GetSkinPath(const CString& sSkinName) { CString sRet = CZNC::Get().GetZNCPath() + "/webskins/" + sSkinName; if (!CFile::IsDir(sRet)) { sRet = CZNC::Get().GetCurPath() + "/webskins/" + sSkinName; if (!CFile::IsDir(sRet)) { sRet = CString(_SKINDIR_) + "/" + sSkinName; } } return sRet + "/"; }
CString CWebSock::GetSkinPath(const CString& sSkinName) { const CString sSkin = sSkinName.Replace_n("/", "_").Replace_n(".", "_"); CString sRet = CZNC::Get().GetZNCPath() + "/webskins/" + sSkin; if (!CFile::IsDir(sRet)) { sRet = CZNC::Get().GetCurPath() + "/webskins/" + sSkin; if (!CFile::IsDir(sRet)) { sRet = CString(_SKINDIR_) + "/" + sSkin; } } return sRet + "/"; }
{ "deleted": [ { "line_no": 2, "char_start": 58, "char_end": 130, "line": " CString sRet = CZNC::Get().GetZNCPath() + \"/webskins/\" + sSkinName;\n" }, { "line_no": 5, "char_start": 162, "char_end": 230, "line": " sRet = CZNC::Get().GetCurPath() + \"/webskins/\" + sSkinName;\n" }, { "line_no": 8, "char_start": 266, "char_end": 323, "line": " sRet = CString(_SKINDIR_) + \"/\" + sSkinName;\n" } ], "added": [ { "line_no": 2, "char_start": 58, "char_end": 135, "line": " const CString sSkin = sSkinName.Replace_n(\"/\", \"_\").Replace_n(\".\", \"_\");\n" }, { "line_no": 3, "char_start": 135, "char_end": 136, "line": "\n" }, { "line_no": 4, "char_start": 136, "char_end": 204, "line": " CString sRet = CZNC::Get().GetZNCPath() + \"/webskins/\" + sSkin;\n" }, { "line_no": 7, "char_start": 236, "char_end": 300, "line": " sRet = CZNC::Get().GetCurPath() + \"/webskins/\" + sSkin;\n" }, { "line_no": 10, "char_start": 336, "char_end": 389, "line": " sRet = CString(_SKINDIR_) + \"/\" + sSkin;\n" } ] }
{ "deleted": [ { "char_start": 124, "char_end": 128, "chars": "Name" }, { "char_start": 224, "char_end": 228, "chars": "Name" }, { "char_start": 317, "char_end": 321, "chars": "Name" } ], "added": [ { "char_start": 62, "char_end": 140, "chars": "const CString sSkin = sSkinName.Replace_n(\"/\", \"_\").Replace_n(\".\", \"_\");\n\n " } ] }
github.com/znc/znc/commit/a4a5aeeb17d32937d8c7d743dae9a4cc755ce773
src/WebModules.cpp
cwe-022
addKey
def addKey(client): """Adds a new key with the specified name and contents. Returns an error if a key with the specified name already exists. """ global BAD_REQUEST global CREATED validateClient(client) client_pub_key = loadClientRSAKey(client) token_data = decodeRequestToken(request.data, client_pub_key) validateNewKeyData(token_data) # Use 'x' flag so we can throw an error if a key with this name already exists try: with open('keys/%s/%s.key' % (client, token_data['name']), 'x') as f: f.write(token_data['key']) except FileExistsError: raise FoxlockError(BAD_REQUEST, "Key '%s' already exists" % token_data['name']) return 'Key successfully created', CREATED
def addKey(client): """Adds a new key with the specified name and contents. Returns an error if a key with the specified name already exists. """ global BAD_REQUEST global CREATED validateClient(client) client_pub_key = loadClientRSAKey(client) token_data = decodeRequestToken(request.data, client_pub_key) validateNewKeyData(token_data) validateKeyName(token_data['name']) # Use 'x' flag so we can throw an error if a key with this name already exists try: with open('keys/%s/%s.key' % (client, token_data['name']), 'x') as f: f.write(token_data['key']) except FileExistsError: raise FoxlockError(BAD_REQUEST, "Key '%s' already exists" % token_data['name']) return 'Key successfully created', CREATED
{ "deleted": [ { "line_no": 9, "char_start": 210, "char_end": 211, "line": "\n" } ], "added": [ { "line_no": 12, "char_start": 348, "char_end": 385, "line": "\tvalidateKeyName(token_data['name'])\n" } ] }
{ "deleted": [ { "char_start": 210, "char_end": 211, "chars": "\n" } ], "added": [ { "char_start": 346, "char_end": 383, "chars": ")\n\tvalidateKeyName(token_data['name']" } ] }
github.com/Mimickal/FoxLock/commit/7c665e556987f4e2c1a75e143a1e80ae066ad833
impl.py
cwe-022
dd_save_text
void dd_save_text(struct dump_dir *dd, const char *name, const char *data) { if (!dd->locked) error_msg_and_die("dump_dir is not opened"); /* bug */ char *full_path = concat_path_file(dd->dd_dirname, name); save_binary_file(full_path, data, strlen(data), dd->dd_uid, dd->dd_gid, dd->mode); free(full_path); }
void dd_save_text(struct dump_dir *dd, const char *name, const char *data) { if (!dd->locked) error_msg_and_die("dump_dir is not opened"); /* bug */ if (!str_is_correct_filename(name)) error_msg_and_die("Cannot save text. '%s' is not a valid file name", name); char *full_path = concat_path_file(dd->dd_dirname, name); save_binary_file(full_path, data, strlen(data), dd->dd_uid, dd->dd_gid, dd->mode); free(full_path); }
{ "deleted": [], "added": [ { "line_no": 6, "char_start": 162, "char_end": 202, "line": " if (!str_is_correct_filename(name))\n" }, { "line_no": 7, "char_start": 202, "char_end": 286, "line": " error_msg_and_die(\"Cannot save text. '%s' is not a valid file name\", name);\n" }, { "line_no": 8, "char_start": 286, "char_end": 287, "line": "\n" } ] }
{ "deleted": [], "added": [ { "char_start": 166, "char_end": 291, "chars": "if (!str_is_correct_filename(name))\n error_msg_and_die(\"Cannot save text. '%s' is not a valid file name\", name);\n\n " } ] }
github.com/abrt/libreport/commit/239c4f7d1f47265526b39ad70106767d00805277
src/lib/dump_dir.c
cwe-022
misc_file_checks
def misc_file_checks(self): print_header("MISC FILE CHECKS") # # Check for recommended and mandatory files # filenames = ("manifest.json", "LICENSE", "README.md", "scripts/install", "scripts/remove", "scripts/upgrade", "scripts/backup", "scripts/restore") non_mandatory = ("script/backup", "script/restore") for filename in filenames: if file_exists(self.path + "/" + filename): continue elif filename in non_mandatory: print_warning("Consider adding a file %s" % filename) else: print_error("File %s is mandatory" % filename) # # Deprecated php-fpm.ini thing # if file_exists(self.path + "/conf/php-fpm.ini"): print_warning( "Using a separate php-fpm.ini file is deprecated. " "Please merge your php-fpm directives directly in the pool file. " "(c.f. https://github.com/YunoHost-Apps/nextcloud_ynh/issues/138 )" ) # # Deprecated usage of 'add_header' in nginx conf # for filename in os.listdir(self.path + "/conf"): if not os.path.isfile(self.path + "/conf/" + filename): continue content = open(self.path + "/conf/" + filename).read() if "location" in content and "add_header" in content: print_warning( "Do not use 'add_header' in the nginx conf. Use 'more_set_headers' instead. " "(See https://www.peterbe.com/plog/be-very-careful-with-your-add_header-in-nginx " "and https://github.com/openresty/headers-more-nginx-module#more_set_headers )" )
def misc_file_checks(self): print_header("MISC FILE CHECKS") # # Check for recommended and mandatory files # filenames = ("manifest.json", "LICENSE", "README.md", "scripts/install", "scripts/remove", "scripts/upgrade", "scripts/backup", "scripts/restore") non_mandatory = ("script/backup", "script/restore") for filename in filenames: if file_exists(self.path + "/" + filename): continue elif filename in non_mandatory: print_warning("Consider adding a file %s" % filename) else: print_error("File %s is mandatory" % filename) # # Deprecated php-fpm.ini thing # if file_exists(self.path + "/conf/php-fpm.ini"): print_warning( "Using a separate php-fpm.ini file is deprecated. " "Please merge your php-fpm directives directly in the pool file. " "(c.f. https://github.com/YunoHost-Apps/nextcloud_ynh/issues/138 )" ) # # Analyze nginx conf # - Deprecated usage of 'add_header' in nginx conf # - Spot path traversal issue vulnerability # for filename in os.listdir(self.path + "/conf"): # Ignore subdirs or filename not containing nginx in the name if not os.path.isfile(self.path + "/conf/" + filename) or "nginx" not in filename: continue # # 'add_header' usage # content = open(self.path + "/conf/" + filename).read() if "location" in content and "add_header" in content: print_warning( "Do not use 'add_header' in the nginx conf. Use 'more_set_headers' instead. " "(See https://www.peterbe.com/plog/be-very-careful-with-your-add_header-in-nginx " "and https://github.com/openresty/headers-more-nginx-module#more_set_headers )" ) # # Path traversal issues # lines = open(self.path + "/conf/" + filename).readlines() lines = [line.strip() for line in lines if not line.strip().startswith("#")] # Let's find the first location line location_line = None path_traversal_vulnerable = False lines_iter = lines.__iter__() for line in lines_iter: if line.startswith("location"): location_line = line break # Look at the next lines for an 'alias' directive if location_line is not None: for line in lines_iter: if line.startswith("location"): # Entering a new location block ... abort here # and assume there's no alias block later... break if line.startswith("alias"): # We should definitely check for path traversal issue # Does the location target ends with / ? target = location_line.split()[-2] if not target.endswith("/"): path_traversal_vulnerable = True break if path_traversal_vulnerable: print_warning( "The nginx configuration appears vulnerable to path traversal as explained in " "https://www.acunetix.com/vulnerabilities/web/path-traversal-via-misconfigured-nginx-alias/\n" "To fix it, look at the first lines of the nginx conf of the example app : " "https://github.com/YunoHost/example_ynh/blob/master/conf/nginx.conf" )
{ "deleted": [ { "line_no": 39, "char_start": 1268, "char_end": 1336, "line": " if not os.path.isfile(self.path + \"/conf/\" + filename):\n" } ], "added": [ { "line_no": 42, "char_start": 1425, "char_end": 1520, "line": " if not os.path.isfile(self.path + \"/conf/\" + filename) or \"nginx\" not in filename:\n" }, { "line_no": 44, "char_start": 1545, "char_end": 1546, "line": "\n" } ] }
{ "deleted": [], "added": [ { "char_start": 1153, "char_end": 1184, "chars": "Analyze nginx conf\n # - " }, { "char_start": 1240, "char_end": 1292, "chars": " - Spot path traversal issue vulnerability\n #" }, { "char_start": 1363, "char_end": 1437, "chars": "# Ignore subdirs or filename not containing nginx in the name\n " }, { "char_start": 1491, "char_end": 1518, "chars": " or \"nginx\" not in filename" }, { "char_start": 1544, "char_end": 1606, "chars": "\n\n #\n # 'add_header' usage\n #" }, { "char_start": 2089, "char_end": 3888, "chars": "\n\n #\n # Path traversal issues\n #\n lines = open(self.path + \"/conf/\" + filename).readlines()\n lines = [line.strip() for line in lines if not line.strip().startswith(\"#\")]\n # Let's find the first location line\n location_line = None\n path_traversal_vulnerable = False\n lines_iter = lines.__iter__()\n for line in lines_iter:\n if line.startswith(\"location\"):\n location_line = line\n break\n # Look at the next lines for an 'alias' directive\n if location_line is not None:\n for line in lines_iter:\n if line.startswith(\"location\"):\n # Entering a new location block ... abort here\n # and assume there's no alias block later...\n break\n if line.startswith(\"alias\"):\n # We should definitely check for path traversal issue\n # Does the location target ends with / ?\n target = location_line.split()[-2]\n if not target.endswith(\"/\"):\n path_traversal_vulnerable = True\n break\n if path_traversal_vulnerable:\n print_warning(\n \"The nginx configuration appears vulnerable to path traversal as explained in \"\n \"https://www.acunetix.com/vulnerabilities/web/path-traversal-via-misconfigured-nginx-alias/\\n\"\n \"To fix it, look at the first lines of the nginx conf of the example app : \"\n \"https://github.com/YunoHost/example_ynh/blob/master/conf/nginx.conf\"\n )" } ] }
github.com/YunoHost/package_linter/commit/f6e98894cfe841aedaa7efd590937f0255193913
package_linter.py
cwe-022
dd_save_binary
void dd_save_binary(struct dump_dir* dd, const char* name, const char* data, unsigned size) { if (!dd->locked) error_msg_and_die("dump_dir is not opened"); /* bug */ char *full_path = concat_path_file(dd->dd_dirname, name); save_binary_file(full_path, data, size, dd->dd_uid, dd->dd_gid, dd->mode); free(full_path); }
void dd_save_binary(struct dump_dir* dd, const char* name, const char* data, unsigned size) { if (!dd->locked) error_msg_and_die("dump_dir is not opened"); /* bug */ if (!str_is_correct_filename(name)) error_msg_and_die("Cannot save binary. '%s' is not a valid file name", name); char *full_path = concat_path_file(dd->dd_dirname, name); save_binary_file(full_path, data, size, dd->dd_uid, dd->dd_gid, dd->mode); free(full_path); }
{ "deleted": [], "added": [ { "line_no": 6, "char_start": 179, "char_end": 219, "line": " if (!str_is_correct_filename(name))\n" }, { "line_no": 7, "char_start": 219, "char_end": 305, "line": " error_msg_and_die(\"Cannot save binary. '%s' is not a valid file name\", name);\n" }, { "line_no": 8, "char_start": 305, "char_end": 306, "line": "\n" } ] }
{ "deleted": [], "added": [ { "char_start": 183, "char_end": 310, "chars": "if (!str_is_correct_filename(name))\n error_msg_and_die(\"Cannot save binary. '%s' is not a valid file name\", name);\n\n " } ] }
github.com/abrt/libreport/commit/239c4f7d1f47265526b39ad70106767d00805277
src/lib/dump_dir.c
cwe-022
pascal_case
def pascal_case(value: str) -> str: return stringcase.pascalcase(value)
def pascal_case(value: str) -> str: return stringcase.pascalcase(_sanitize(value))
{ "deleted": [ { "line_no": 2, "char_start": 36, "char_end": 75, "line": " return stringcase.pascalcase(value)\n" } ], "added": [ { "line_no": 2, "char_start": 36, "char_end": 86, "line": " return stringcase.pascalcase(_sanitize(value))\n" } ] }
{ "deleted": [], "added": [ { "char_start": 69, "char_end": 79, "chars": "_sanitize(" }, { "char_start": 85, "char_end": 86, "chars": ")" } ] }
github.com/openapi-generators/openapi-python-client/commit/3e7dfae5d0b3685abf1ede1bc6c086a116ac4746
openapi_python_client/utils.py
cwe-022
cleanup_pathname
cleanup_pathname(struct archive_write_disk *a) { char *dest, *src; char separator = '\0'; dest = src = a->name; if (*src == '\0') { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid empty pathname"); return (ARCHIVE_FAILED); } #if defined(__CYGWIN__) cleanup_pathname_win(a); #endif /* Skip leading '/'. */ if (*src == '/') separator = *src++; /* Scan the pathname one element at a time. */ for (;;) { /* src points to first char after '/' */ if (src[0] == '\0') { break; } else if (src[0] == '/') { /* Found '//', ignore second one. */ src++; continue; } else if (src[0] == '.') { if (src[1] == '\0') { /* Ignore trailing '.' */ break; } else if (src[1] == '/') { /* Skip './'. */ src += 2; continue; } else if (src[1] == '.') { if (src[2] == '/' || src[2] == '\0') { /* Conditionally warn about '..' */ if (a->flags & ARCHIVE_EXTRACT_SECURE_NODOTDOT) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Path contains '..'"); return (ARCHIVE_FAILED); } } /* * Note: Under no circumstances do we * remove '..' elements. In * particular, restoring * '/foo/../bar/' should create the * 'foo' dir as a side-effect. */ } } /* Copy current element, including leading '/'. */ if (separator) *dest++ = '/'; while (*src != '\0' && *src != '/') { *dest++ = *src++; } if (*src == '\0') break; /* Skip '/' separator. */ separator = *src++; } /* * We've just copied zero or more path elements, not including the * final '/'. */ if (dest == a->name) { /* * Nothing got copied. The path must have been something * like '.' or '/' or './' or '/././././/./'. */ if (separator) *dest++ = '/'; else *dest++ = '.'; } /* Terminate the result. */ *dest = '\0'; return (ARCHIVE_OK); }
cleanup_pathname(struct archive_write_disk *a) { char *dest, *src; char separator = '\0'; dest = src = a->name; if (*src == '\0') { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid empty pathname"); return (ARCHIVE_FAILED); } #if defined(__CYGWIN__) cleanup_pathname_win(a); #endif /* Skip leading '/'. */ if (*src == '/') { if (a->flags & ARCHIVE_EXTRACT_SECURE_NOABSOLUTEPATHS) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Path is absolute"); return (ARCHIVE_FAILED); } separator = *src++; } /* Scan the pathname one element at a time. */ for (;;) { /* src points to first char after '/' */ if (src[0] == '\0') { break; } else if (src[0] == '/') { /* Found '//', ignore second one. */ src++; continue; } else if (src[0] == '.') { if (src[1] == '\0') { /* Ignore trailing '.' */ break; } else if (src[1] == '/') { /* Skip './'. */ src += 2; continue; } else if (src[1] == '.') { if (src[2] == '/' || src[2] == '\0') { /* Conditionally warn about '..' */ if (a->flags & ARCHIVE_EXTRACT_SECURE_NODOTDOT) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Path contains '..'"); return (ARCHIVE_FAILED); } } /* * Note: Under no circumstances do we * remove '..' elements. In * particular, restoring * '/foo/../bar/' should create the * 'foo' dir as a side-effect. */ } } /* Copy current element, including leading '/'. */ if (separator) *dest++ = '/'; while (*src != '\0' && *src != '/') { *dest++ = *src++; } if (*src == '\0') break; /* Skip '/' separator. */ separator = *src++; } /* * We've just copied zero or more path elements, not including the * final '/'. */ if (dest == a->name) { /* * Nothing got copied. The path must have been something * like '.' or '/' or './' or '/././././/./'. */ if (separator) *dest++ = '/'; else *dest++ = '.'; } /* Terminate the result. */ *dest = '\0'; return (ARCHIVE_OK); }
{ "deleted": [ { "line_no": 17, "char_start": 336, "char_end": 354, "line": "\tif (*src == '/')\n" } ], "added": [ { "line_no": 17, "char_start": 336, "char_end": 356, "line": "\tif (*src == '/') {\n" }, { "line_no": 18, "char_start": 356, "char_end": 415, "line": "\t\tif (a->flags & ARCHIVE_EXTRACT_SECURE_NOABSOLUTEPATHS) {\n" }, { "line_no": 19, "char_start": 415, "char_end": 469, "line": "\t\t\tarchive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,\n" }, { "line_no": 20, "char_start": 469, "char_end": 511, "line": "\t\t\t \"Path is absolute\");\n" }, { "line_no": 21, "char_start": 511, "char_end": 539, "line": "\t\t\treturn (ARCHIVE_FAILED);\n" }, { "line_no": 22, "char_start": 539, "char_end": 543, "line": "\t\t}\n" }, { "line_no": 23, "char_start": 543, "char_end": 544, "line": "\n" }, { "line_no": 25, "char_start": 566, "char_end": 569, "line": "\t}\n" } ] }
{ "deleted": [], "added": [ { "char_start": 353, "char_end": 543, "chars": " {\n\t\tif (a->flags & ARCHIVE_EXTRACT_SECURE_NOABSOLUTEPATHS) {\n\t\t\tarchive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,\n\t\t\t \"Path is absolute\");\n\t\t\treturn (ARCHIVE_FAILED);\n\t\t}\n" }, { "char_start": 565, "char_end": 568, "chars": "\n\t}" } ] }
github.com/libarchive/libarchive/commit/59357157706d47c365b2227739e17daba3607526
libarchive/archive_write_disk_posix.c
cwe-022
create_dump_dir_from_problem_data
struct dump_dir *create_dump_dir_from_problem_data(problem_data_t *problem_data, const char *base_dir_name) { INITIALIZE_LIBREPORT(); char *type = problem_data_get_content_or_NULL(problem_data, FILENAME_ANALYZER); if (!type) { error_msg(_("Missing required item: '%s'"), FILENAME_ANALYZER); return NULL; } uid_t uid = (uid_t)-1L; char *uid_str = problem_data_get_content_or_NULL(problem_data, FILENAME_UID); if (uid_str) { char *endptr; errno = 0; long val = strtol(uid_str, &endptr, 10); if (errno != 0 || endptr == uid_str || *endptr != '\0' || INT_MAX < val) { error_msg(_("uid value is not valid: '%s'"), uid_str); return NULL; } uid = (uid_t)val; } struct timeval tv; if (gettimeofday(&tv, NULL) < 0) { perror_msg("gettimeofday()"); return NULL; } char *problem_id = xasprintf("%s-%s.%ld-%lu"NEW_PD_SUFFIX, type, iso_date_string(&(tv.tv_sec)), (long)tv.tv_usec, (long)getpid()); log_info("Saving to %s/%s with uid %d", base_dir_name, problem_id, uid); struct dump_dir *dd; if (base_dir_name) dd = try_dd_create(base_dir_name, problem_id, uid); else { /* Try /var/run/abrt */ dd = try_dd_create(LOCALSTATEDIR"/run/abrt", problem_id, uid); /* Try $HOME/tmp */ if (!dd) { char *home = getenv("HOME"); if (home && home[0]) { home = concat_path_file(home, "tmp"); /*mkdir(home, 0777); - do we want this? */ dd = try_dd_create(home, problem_id, uid); free(home); } } //TODO: try user's home dir obtained by getpwuid(getuid())? /* Try system temporary directory */ if (!dd) dd = try_dd_create(LARGE_DATA_TMP_DIR, problem_id, uid); } if (!dd) /* try_dd_create() already emitted the error message */ goto ret; GHashTableIter iter; char *name; struct problem_item *value; g_hash_table_iter_init(&iter, problem_data); while (g_hash_table_iter_next(&iter, (void**)&name, (void**)&value)) { if (value->flags & CD_FLAG_BIN) { char *dest = concat_path_file(dd->dd_dirname, name); log_info("copying '%s' to '%s'", value->content, dest); off_t copied = copy_file(value->content, dest, DEFAULT_DUMP_DIR_MODE | S_IROTH); if (copied < 0) error_msg("Can't copy %s to %s", value->content, dest); else log_info("copied %li bytes", (unsigned long)copied); free(dest); continue; } /* only files should contain '/' and those are handled earlier */ if (name[0] == '.' || strchr(name, '/')) { error_msg("Problem data field name contains disallowed chars: '%s'", name); continue; } dd_save_text(dd, name, value->content); } /* need to create basic files AFTER we save the pd to dump_dir * otherwise we can't skip already created files like in case when * reporting from anaconda where we can't read /etc/{system,redhat}-release * and os_release is taken from anaconda */ dd_create_basic_files(dd, uid, NULL); problem_id[strlen(problem_id) - strlen(NEW_PD_SUFFIX)] = '\0'; char* new_path = concat_path_file(base_dir_name, problem_id); log_info("Renaming from '%s' to '%s'", dd->dd_dirname, new_path); dd_rename(dd, new_path); ret: free(problem_id); return dd; }
struct dump_dir *create_dump_dir_from_problem_data(problem_data_t *problem_data, const char *base_dir_name) { INITIALIZE_LIBREPORT(); char *type = problem_data_get_content_or_NULL(problem_data, FILENAME_ANALYZER); if (!type) { error_msg(_("Missing required item: '%s'"), FILENAME_ANALYZER); return NULL; } if (!str_is_correct_filename(type)) { error_msg(_("'%s' is not correct file name"), FILENAME_ANALYZER); return NULL; } uid_t uid = (uid_t)-1L; char *uid_str = problem_data_get_content_or_NULL(problem_data, FILENAME_UID); if (uid_str) { char *endptr; errno = 0; long val = strtol(uid_str, &endptr, 10); if (errno != 0 || endptr == uid_str || *endptr != '\0' || INT_MAX < val) { error_msg(_("uid value is not valid: '%s'"), uid_str); return NULL; } uid = (uid_t)val; } struct timeval tv; if (gettimeofday(&tv, NULL) < 0) { perror_msg("gettimeofday()"); return NULL; } char *problem_id = xasprintf("%s-%s.%ld-%lu"NEW_PD_SUFFIX, type, iso_date_string(&(tv.tv_sec)), (long)tv.tv_usec, (long)getpid()); log_info("Saving to %s/%s with uid %d", base_dir_name, problem_id, uid); struct dump_dir *dd; if (base_dir_name) dd = try_dd_create(base_dir_name, problem_id, uid); else { /* Try /var/run/abrt */ dd = try_dd_create(LOCALSTATEDIR"/run/abrt", problem_id, uid); /* Try $HOME/tmp */ if (!dd) { char *home = getenv("HOME"); if (home && home[0]) { home = concat_path_file(home, "tmp"); /*mkdir(home, 0777); - do we want this? */ dd = try_dd_create(home, problem_id, uid); free(home); } } //TODO: try user's home dir obtained by getpwuid(getuid())? /* Try system temporary directory */ if (!dd) dd = try_dd_create(LARGE_DATA_TMP_DIR, problem_id, uid); } if (!dd) /* try_dd_create() already emitted the error message */ goto ret; GHashTableIter iter; char *name; struct problem_item *value; g_hash_table_iter_init(&iter, problem_data); while (g_hash_table_iter_next(&iter, (void**)&name, (void**)&value)) { if (!str_is_correct_filename(name)) { error_msg("Problem data field name contains disallowed chars: '%s'", name); continue; } if (value->flags & CD_FLAG_BIN) { char *dest = concat_path_file(dd->dd_dirname, name); log_info("copying '%s' to '%s'", value->content, dest); off_t copied = copy_file(value->content, dest, DEFAULT_DUMP_DIR_MODE | S_IROTH); if (copied < 0) error_msg("Can't copy %s to %s", value->content, dest); else log_info("copied %li bytes", (unsigned long)copied); free(dest); continue; } dd_save_text(dd, name, value->content); } /* need to create basic files AFTER we save the pd to dump_dir * otherwise we can't skip already created files like in case when * reporting from anaconda where we can't read /etc/{system,redhat}-release * and os_release is taken from anaconda */ dd_create_basic_files(dd, uid, NULL); problem_id[strlen(problem_id) - strlen(NEW_PD_SUFFIX)] = '\0'; char* new_path = concat_path_file(base_dir_name, problem_id); log_info("Renaming from '%s' to '%s'", dd->dd_dirname, new_path); dd_rename(dd, new_path); ret: free(problem_id); return dd; }
{ "deleted": [ { "line_no": 90, "char_start": 2743, "char_end": 2817, "line": " /* only files should contain '/' and those are handled earlier */\n" }, { "line_no": 91, "char_start": 2817, "char_end": 2866, "line": " if (name[0] == '.' || strchr(name, '/'))\n" }, { "line_no": 92, "char_start": 2866, "char_end": 2876, "line": " {\n" }, { "line_no": 93, "char_start": 2876, "char_end": 2964, "line": " error_msg(\"Problem data field name contains disallowed chars: '%s'\", name);\n" }, { "line_no": 94, "char_start": 2964, "char_end": 2986, "line": " continue;\n" }, { "line_no": 95, "char_start": 2986, "char_end": 2996, "line": " }\n" }, { "line_no": 96, "char_start": 2996, "char_end": 2997, "line": "\n" } ], "added": [ { "line_no": 13, "char_start": 345, "char_end": 385, "line": " if (!str_is_correct_filename(type))\n" }, { "line_no": 14, "char_start": 385, "char_end": 391, "line": " {\n" }, { "line_no": 15, "char_start": 391, "char_end": 465, "line": " error_msg(_(\"'%s' is not correct file name\"), FILENAME_ANALYZER);\n" }, { "line_no": 16, "char_start": 465, "char_end": 486, "line": " return NULL;\n" }, { "line_no": 17, "char_start": 486, "char_end": 492, "line": " }\n" }, { "line_no": 18, "char_start": 492, "char_end": 493, "line": "\n" }, { "line_no": 82, "char_start": 2371, "char_end": 2415, "line": " if (!str_is_correct_filename(name))\n" }, { "line_no": 83, "char_start": 2415, "char_end": 2425, "line": " {\n" }, { "line_no": 84, "char_start": 2425, "char_end": 2513, "line": " error_msg(\"Problem data field name contains disallowed chars: '%s'\", name);\n" }, { "line_no": 85, "char_start": 2513, "char_end": 2535, "line": " continue;\n" }, { "line_no": 86, "char_start": 2535, "char_end": 2545, "line": " }\n" }, { "line_no": 87, "char_start": 2545, "char_end": 2546, "line": "\n" } ] }
{ "deleted": [ { "char_start": 2709, "char_end": 2963, "chars": "\n continue;\n }\n\n /* only files should contain '/' and those are handled earlier */\n if (name[0] == '.' || strchr(name, '/'))\n {\n error_msg(\"Problem data field name contains disallowed chars: '%s'\", name);" } ], "added": [ { "char_start": 349, "char_end": 497, "chars": "if (!str_is_correct_filename(type))\n {\n error_msg(_(\"'%s' is not correct file name\"), FILENAME_ANALYZER);\n return NULL;\n }\n\n " }, { "char_start": 2383, "char_end": 2558, "chars": "!str_is_correct_filename(name))\n {\n error_msg(\"Problem data field name contains disallowed chars: '%s'\", name);\n continue;\n }\n\n if (" } ] }
github.com/abrt/libreport/commit/239c4f7d1f47265526b39ad70106767d00805277
src/lib/create_dump_dir.c
cwe-022
list
def list(self, keyfilter='/'): path = os.path.join(self.namespace, keyfilter) if path != '/': path = path.rstrip('/') try: result = self.etcd.read(path, recursive=True) except etcd.EtcdKeyNotFound: return None except etcd.EtcdException as err: log_error("Error listing %s: [%r]" % (keyfilter, repr(err))) raise CSStoreError('Error occurred while trying to list keys') value = set() for entry in result.get_subtree(): if entry.key == path: continue name = entry.key[len(path):] if entry.dir and not name.endswith('/'): name += '/' value.add(name.lstrip('/')) return sorted(value)
def list(self, keyfilter='/'): path = self._absolute_key(keyfilter) if path != '/': path = path.rstrip('/') try: result = self.etcd.read(path, recursive=True) except etcd.EtcdKeyNotFound: return None except etcd.EtcdException as err: log_error("Error listing %s: [%r]" % (keyfilter, repr(err))) raise CSStoreError('Error occurred while trying to list keys') value = set() for entry in result.get_subtree(): if entry.key == path: continue name = entry.key[len(path):] if entry.dir and not name.endswith('/'): name += '/' value.add(name.lstrip('/')) return sorted(value)
{ "deleted": [ { "line_no": 2, "char_start": 35, "char_end": 90, "line": " path = os.path.join(self.namespace, keyfilter)\n" } ], "added": [ { "line_no": 2, "char_start": 35, "char_end": 80, "line": " path = self._absolute_key(keyfilter)\n" } ] }
{ "deleted": [ { "char_start": 50, "char_end": 63, "chars": "os.path.join(" }, { "char_start": 68, "char_end": 69, "chars": "n" }, { "char_start": 70, "char_end": 72, "chars": "me" }, { "char_start": 73, "char_end": 76, "chars": "pac" }, { "char_start": 77, "char_end": 79, "chars": ", " } ], "added": [ { "char_start": 55, "char_end": 56, "chars": "_" }, { "char_start": 57, "char_end": 63, "chars": "bsolut" }, { "char_start": 64, "char_end": 66, "chars": "_k" }, { "char_start": 67, "char_end": 69, "chars": "y(" } ] }
github.com/latchset/custodia/commit/785fc87f38b4811bc4ce43a0a9b2267ee7d500b4
custodia/store/etcdstore.py
cwe-022
process
local void process(char *path) { int method = -1; /* get_header() return value */ size_t len; /* length of base name (minus suffix) */ struct stat st; /* to get file type and mod time */ /* all compressed suffixes for decoding search, in length order */ static char *sufs[] = {".z", "-z", "_z", ".Z", ".gz", "-gz", ".zz", "-zz", ".zip", ".ZIP", ".tgz", NULL}; /* open input file with name in, descriptor ind -- set name and mtime */ if (path == NULL) { strcpy(g.inf, "<stdin>"); g.ind = 0; g.name = NULL; g.mtime = g.headis & 2 ? (fstat(g.ind, &st) ? time(NULL) : st.st_mtime) : 0; len = 0; } else { /* set input file name (already set if recursed here) */ if (path != g.inf) { strncpy(g.inf, path, sizeof(g.inf)); if (g.inf[sizeof(g.inf) - 1]) bail("name too long: ", path); } len = strlen(g.inf); /* try to stat input file -- if not there and decoding, look for that name with compressed suffixes */ if (lstat(g.inf, &st)) { if (errno == ENOENT && (g.list || g.decode)) { char **try = sufs; do { if (*try == NULL || len + strlen(*try) >= sizeof(g.inf)) break; strcpy(g.inf + len, *try++); errno = 0; } while (lstat(g.inf, &st) && errno == ENOENT); } #ifdef EOVERFLOW if (errno == EOVERFLOW || errno == EFBIG) bail(g.inf, " too large -- not compiled with large file support"); #endif if (errno) { g.inf[len] = 0; complain("%s does not exist -- skipping", g.inf); return; } len = strlen(g.inf); } /* only process regular files, but allow symbolic links if -f, recurse into directory if -r */ if ((st.st_mode & S_IFMT) != S_IFREG && (st.st_mode & S_IFMT) != S_IFLNK && (st.st_mode & S_IFMT) != S_IFDIR) { complain("%s is a special file or device -- skipping", g.inf); return; } if ((st.st_mode & S_IFMT) == S_IFLNK && !g.force && !g.pipeout) { complain("%s is a symbolic link -- skipping", g.inf); return; } if ((st.st_mode & S_IFMT) == S_IFDIR && !g.recurse) { complain("%s is a directory -- skipping", g.inf); return; } /* recurse into directory (assumes Unix) */ if ((st.st_mode & S_IFMT) == S_IFDIR) { char *roll, *item, *cut, *base, *bigger; size_t len, hold; DIR *here; struct dirent *next; /* accumulate list of entries (need to do this, since readdir() behavior not defined if directory modified between calls) */ here = opendir(g.inf); if (here == NULL) return; hold = 512; roll = MALLOC(hold); if (roll == NULL) bail("not enough memory", ""); *roll = 0; item = roll; while ((next = readdir(here)) != NULL) { if (next->d_name[0] == 0 || (next->d_name[0] == '.' && (next->d_name[1] == 0 || (next->d_name[1] == '.' && next->d_name[2] == 0)))) continue; len = strlen(next->d_name) + 1; if (item + len + 1 > roll + hold) { do { /* make roll bigger */ hold <<= 1; } while (item + len + 1 > roll + hold); bigger = REALLOC(roll, hold); if (bigger == NULL) { FREE(roll); bail("not enough memory", ""); } item = bigger + (item - roll); roll = bigger; } strcpy(item, next->d_name); item += len; *item = 0; } closedir(here); /* run process() for each entry in the directory */ cut = base = g.inf + strlen(g.inf); if (base > g.inf && base[-1] != (unsigned char)'/') { if ((size_t)(base - g.inf) >= sizeof(g.inf)) bail("path too long", g.inf); *base++ = '/'; } item = roll; while (*item) { strncpy(base, item, sizeof(g.inf) - (base - g.inf)); if (g.inf[sizeof(g.inf) - 1]) { strcpy(g.inf + (sizeof(g.inf) - 4), "..."); bail("path too long: ", g.inf); } process(g.inf); item += strlen(item) + 1; } *cut = 0; /* release list of entries */ FREE(roll); return; } /* don't compress .gz (or provided suffix) files, unless -f */ if (!(g.force || g.list || g.decode) && len >= strlen(g.sufx) && strcmp(g.inf + len - strlen(g.sufx), g.sufx) == 0) { complain("%s ends with %s -- skipping", g.inf, g.sufx); return; } /* create output file only if input file has compressed suffix */ if (g.decode == 1 && !g.pipeout && !g.list) { int suf = compressed_suffix(g.inf); if (suf == 0) { complain("%s does not have compressed suffix -- skipping", g.inf); return; } len -= suf; } /* open input file */ g.ind = open(g.inf, O_RDONLY, 0); if (g.ind < 0) bail("read error on ", g.inf); /* prepare gzip header information for compression */ g.name = g.headis & 1 ? justname(g.inf) : NULL; g.mtime = g.headis & 2 ? st.st_mtime : 0; } SET_BINARY_MODE(g.ind); /* if decoding or testing, try to read gzip header */ g.hname = NULL; if (g.decode) { in_init(); method = get_header(1); if (method != 8 && method != 257 && /* gzip -cdf acts like cat on uncompressed input */ !(method == -2 && g.force && g.pipeout && g.decode != 2 && !g.list)) { RELEASE(g.hname); if (g.ind != 0) close(g.ind); if (method != -1) complain(method < 0 ? "%s is not compressed -- skipping" : "%s has unknown compression method -- skipping", g.inf); return; } /* if requested, test input file (possibly a special list) */ if (g.decode == 2) { if (method == 8) infchk(); else { unlzw(); if (g.list) { g.in_tot -= 3; show_info(method, 0, g.out_tot, 0); } } RELEASE(g.hname); if (g.ind != 0) close(g.ind); return; } } /* if requested, just list information about input file */ if (g.list) { list_info(); RELEASE(g.hname); if (g.ind != 0) close(g.ind); return; } /* create output file out, descriptor outd */ if (path == NULL || g.pipeout) { /* write to stdout */ g.outf = MALLOC(strlen("<stdout>") + 1); if (g.outf == NULL) bail("not enough memory", ""); strcpy(g.outf, "<stdout>"); g.outd = 1; if (!g.decode && !g.force && isatty(g.outd)) bail("trying to write compressed data to a terminal", " (use -f to force)"); } else { char *to, *repl; /* use header name for output when decompressing with -N */ to = g.inf; if (g.decode && (g.headis & 1) != 0 && g.hname != NULL) { to = g.hname; len = strlen(g.hname); } /* replace .tgz with .tar when decoding */ repl = g.decode && strcmp(to + len, ".tgz") ? "" : ".tar"; /* create output file and open to write */ g.outf = MALLOC(len + (g.decode ? strlen(repl) : strlen(g.sufx)) + 1); if (g.outf == NULL) bail("not enough memory", ""); memcpy(g.outf, to, len); strcpy(g.outf + len, g.decode ? repl : g.sufx); g.outd = open(g.outf, O_CREAT | O_TRUNC | O_WRONLY | (g.force ? 0 : O_EXCL), 0600); /* if exists and not -f, give user a chance to overwrite */ if (g.outd < 0 && errno == EEXIST && isatty(0) && g.verbosity) { int ch, reply; fprintf(stderr, "%s exists -- overwrite (y/n)? ", g.outf); fflush(stderr); reply = -1; do { ch = getchar(); if (reply < 0 && ch != ' ' && ch != '\t') reply = ch == 'y' || ch == 'Y' ? 1 : 0; } while (ch != EOF && ch != '\n' && ch != '\r'); if (reply == 1) g.outd = open(g.outf, O_CREAT | O_TRUNC | O_WRONLY, 0600); } /* if exists and no overwrite, report and go on to next */ if (g.outd < 0 && errno == EEXIST) { complain("%s exists -- skipping", g.outf); RELEASE(g.outf); RELEASE(g.hname); if (g.ind != 0) close(g.ind); return; } /* if some other error, give up */ if (g.outd < 0) bail("write error on ", g.outf); } SET_BINARY_MODE(g.outd); RELEASE(g.hname); /* process ind to outd */ if (g.verbosity > 1) fprintf(stderr, "%s to %s ", g.inf, g.outf); if (g.decode) { if (method == 8) infchk(); else if (method == 257) unlzw(); else cat(); } #ifndef NOTHREAD else if (g.procs > 1) parallel_compress(); #endif else single_compress(0); if (g.verbosity > 1) { putc('\n', stderr); fflush(stderr); } /* finish up, copy attributes, set times, delete original */ if (g.ind != 0) close(g.ind); if (g.outd != 1) { if (close(g.outd)) bail("write error on ", g.outf); g.outd = -1; /* now prevent deletion on interrupt */ if (g.ind != 0) { copymeta(g.inf, g.outf); if (!g.keep) unlink(g.inf); } if (g.decode && (g.headis & 2) != 0 && g.stamp) touch(g.outf, g.stamp); } RELEASE(g.outf); }
local void process(char *path) { int method = -1; /* get_header() return value */ size_t len; /* length of base name (minus suffix) */ struct stat st; /* to get file type and mod time */ /* all compressed suffixes for decoding search, in length order */ static char *sufs[] = {".z", "-z", "_z", ".Z", ".gz", "-gz", ".zz", "-zz", ".zip", ".ZIP", ".tgz", NULL}; /* open input file with name in, descriptor ind -- set name and mtime */ if (path == NULL) { strcpy(g.inf, "<stdin>"); g.ind = 0; g.name = NULL; g.mtime = g.headis & 2 ? (fstat(g.ind, &st) ? time(NULL) : st.st_mtime) : 0; len = 0; } else { /* set input file name (already set if recursed here) */ if (path != g.inf) { strncpy(g.inf, path, sizeof(g.inf)); if (g.inf[sizeof(g.inf) - 1]) bail("name too long: ", path); } len = strlen(g.inf); /* try to stat input file -- if not there and decoding, look for that name with compressed suffixes */ if (lstat(g.inf, &st)) { if (errno == ENOENT && (g.list || g.decode)) { char **try = sufs; do { if (*try == NULL || len + strlen(*try) >= sizeof(g.inf)) break; strcpy(g.inf + len, *try++); errno = 0; } while (lstat(g.inf, &st) && errno == ENOENT); } #ifdef EOVERFLOW if (errno == EOVERFLOW || errno == EFBIG) bail(g.inf, " too large -- not compiled with large file support"); #endif if (errno) { g.inf[len] = 0; complain("%s does not exist -- skipping", g.inf); return; } len = strlen(g.inf); } /* only process regular files, but allow symbolic links if -f, recurse into directory if -r */ if ((st.st_mode & S_IFMT) != S_IFREG && (st.st_mode & S_IFMT) != S_IFLNK && (st.st_mode & S_IFMT) != S_IFDIR) { complain("%s is a special file or device -- skipping", g.inf); return; } if ((st.st_mode & S_IFMT) == S_IFLNK && !g.force && !g.pipeout) { complain("%s is a symbolic link -- skipping", g.inf); return; } if ((st.st_mode & S_IFMT) == S_IFDIR && !g.recurse) { complain("%s is a directory -- skipping", g.inf); return; } /* recurse into directory (assumes Unix) */ if ((st.st_mode & S_IFMT) == S_IFDIR) { char *roll, *item, *cut, *base, *bigger; size_t len, hold; DIR *here; struct dirent *next; /* accumulate list of entries (need to do this, since readdir() behavior not defined if directory modified between calls) */ here = opendir(g.inf); if (here == NULL) return; hold = 512; roll = MALLOC(hold); if (roll == NULL) bail("not enough memory", ""); *roll = 0; item = roll; while ((next = readdir(here)) != NULL) { if (next->d_name[0] == 0 || (next->d_name[0] == '.' && (next->d_name[1] == 0 || (next->d_name[1] == '.' && next->d_name[2] == 0)))) continue; len = strlen(next->d_name) + 1; if (item + len + 1 > roll + hold) { do { /* make roll bigger */ hold <<= 1; } while (item + len + 1 > roll + hold); bigger = REALLOC(roll, hold); if (bigger == NULL) { FREE(roll); bail("not enough memory", ""); } item = bigger + (item - roll); roll = bigger; } strcpy(item, next->d_name); item += len; *item = 0; } closedir(here); /* run process() for each entry in the directory */ cut = base = g.inf + strlen(g.inf); if (base > g.inf && base[-1] != (unsigned char)'/') { if ((size_t)(base - g.inf) >= sizeof(g.inf)) bail("path too long", g.inf); *base++ = '/'; } item = roll; while (*item) { strncpy(base, item, sizeof(g.inf) - (base - g.inf)); if (g.inf[sizeof(g.inf) - 1]) { strcpy(g.inf + (sizeof(g.inf) - 4), "..."); bail("path too long: ", g.inf); } process(g.inf); item += strlen(item) + 1; } *cut = 0; /* release list of entries */ FREE(roll); return; } /* don't compress .gz (or provided suffix) files, unless -f */ if (!(g.force || g.list || g.decode) && len >= strlen(g.sufx) && strcmp(g.inf + len - strlen(g.sufx), g.sufx) == 0) { complain("%s ends with %s -- skipping", g.inf, g.sufx); return; } /* create output file only if input file has compressed suffix */ if (g.decode == 1 && !g.pipeout && !g.list) { int suf = compressed_suffix(g.inf); if (suf == 0) { complain("%s does not have compressed suffix -- skipping", g.inf); return; } len -= suf; } /* open input file */ g.ind = open(g.inf, O_RDONLY, 0); if (g.ind < 0) bail("read error on ", g.inf); /* prepare gzip header information for compression */ g.name = g.headis & 1 ? justname(g.inf) : NULL; g.mtime = g.headis & 2 ? st.st_mtime : 0; } SET_BINARY_MODE(g.ind); /* if decoding or testing, try to read gzip header */ g.hname = NULL; if (g.decode) { in_init(); method = get_header(1); if (method != 8 && method != 257 && /* gzip -cdf acts like cat on uncompressed input */ !(method == -2 && g.force && g.pipeout && g.decode != 2 && !g.list)) { RELEASE(g.hname); if (g.ind != 0) close(g.ind); if (method != -1) complain(method < 0 ? "%s is not compressed -- skipping" : "%s has unknown compression method -- skipping", g.inf); return; } /* if requested, test input file (possibly a special list) */ if (g.decode == 2) { if (method == 8) infchk(); else { unlzw(); if (g.list) { g.in_tot -= 3; show_info(method, 0, g.out_tot, 0); } } RELEASE(g.hname); if (g.ind != 0) close(g.ind); return; } } /* if requested, just list information about input file */ if (g.list) { list_info(); RELEASE(g.hname); if (g.ind != 0) close(g.ind); return; } /* create output file out, descriptor outd */ if (path == NULL || g.pipeout) { /* write to stdout */ g.outf = MALLOC(strlen("<stdout>") + 1); if (g.outf == NULL) bail("not enough memory", ""); strcpy(g.outf, "<stdout>"); g.outd = 1; if (!g.decode && !g.force && isatty(g.outd)) bail("trying to write compressed data to a terminal", " (use -f to force)"); } else { char *to = g.inf, *sufx = ""; size_t pre = 0; /* select parts of the output file name */ if (g.decode) { /* for -dN or -dNT, use the path from the input file and the name from the header, stripping any path in the header name */ if ((g.headis & 1) != 0 && g.hname != NULL) { pre = justname(g.inf) - g.inf; to = justname(g.hname); len = strlen(to); } /* for -d or -dNn, replace abbreviated suffixes */ else if (strcmp(to + len, ".tgz") == 0) sufx = ".tar"; } else /* add appropriate suffix when compressing */ sufx = g.sufx; /* create output file and open to write */ g.outf = MALLOC(pre + len + strlen(sufx) + 1); if (g.outf == NULL) bail("not enough memory", ""); memcpy(g.outf, g.inf, pre); memcpy(g.outf + pre, to, len); strcpy(g.outf + pre + len, sufx); g.outd = open(g.outf, O_CREAT | O_TRUNC | O_WRONLY | (g.force ? 0 : O_EXCL), 0600); /* if exists and not -f, give user a chance to overwrite */ if (g.outd < 0 && errno == EEXIST && isatty(0) && g.verbosity) { int ch, reply; fprintf(stderr, "%s exists -- overwrite (y/n)? ", g.outf); fflush(stderr); reply = -1; do { ch = getchar(); if (reply < 0 && ch != ' ' && ch != '\t') reply = ch == 'y' || ch == 'Y' ? 1 : 0; } while (ch != EOF && ch != '\n' && ch != '\r'); if (reply == 1) g.outd = open(g.outf, O_CREAT | O_TRUNC | O_WRONLY, 0600); } /* if exists and no overwrite, report and go on to next */ if (g.outd < 0 && errno == EEXIST) { complain("%s exists -- skipping", g.outf); RELEASE(g.outf); RELEASE(g.hname); if (g.ind != 0) close(g.ind); return; } /* if some other error, give up */ if (g.outd < 0) bail("write error on ", g.outf); } SET_BINARY_MODE(g.outd); RELEASE(g.hname); /* process ind to outd */ if (g.verbosity > 1) fprintf(stderr, "%s to %s ", g.inf, g.outf); if (g.decode) { if (method == 8) infchk(); else if (method == 257) unlzw(); else cat(); } #ifndef NOTHREAD else if (g.procs > 1) parallel_compress(); #endif else single_compress(0); if (g.verbosity > 1) { putc('\n', stderr); fflush(stderr); } /* finish up, copy attributes, set times, delete original */ if (g.ind != 0) close(g.ind); if (g.outd != 1) { if (close(g.outd)) bail("write error on ", g.outf); g.outd = -1; /* now prevent deletion on interrupt */ if (g.ind != 0) { copymeta(g.inf, g.outf); if (!g.keep) unlink(g.inf); } if (g.decode && (g.headis & 2) != 0 && g.stamp) touch(g.outf, g.stamp); } RELEASE(g.outf); }
{ "deleted": [ { "line_no": 224, "char_start": 8033, "char_end": 8058, "line": " char *to, *repl;\n" }, { "line_no": 225, "char_start": 8058, "char_end": 8059, "line": "\n" }, { "line_no": 226, "char_start": 8059, "char_end": 8127, "line": " /* use header name for output when decompressing with -N */\n" }, { "line_no": 227, "char_start": 8127, "char_end": 8147, "line": " to = g.inf;\n" }, { "line_no": 228, "char_start": 8147, "char_end": 8213, "line": " if (g.decode && (g.headis & 1) != 0 && g.hname != NULL) {\n" }, { "line_no": 229, "char_start": 8213, "char_end": 8239, "line": " to = g.hname;\n" }, { "line_no": 230, "char_start": 8239, "char_end": 8274, "line": " len = strlen(g.hname);\n" }, { "line_no": 232, "char_start": 8284, "char_end": 8285, "line": "\n" }, { "line_no": 233, "char_start": 8285, "char_end": 8336, "line": " /* replace .tgz with .tar when decoding */\n" }, { "line_no": 234, "char_start": 8336, "char_end": 8403, "line": " repl = g.decode && strcmp(to + len, \".tgz\") ? \"\" : \".tar\";\n" }, { "line_no": 237, "char_start": 8455, "char_end": 8534, "line": " g.outf = MALLOC(len + (g.decode ? strlen(repl) : strlen(g.sufx)) + 1);\n" }, { "line_no": 240, "char_start": 8605, "char_end": 8638, "line": " memcpy(g.outf, to, len);\n" }, { "line_no": 241, "char_start": 8638, "char_end": 8694, "line": " strcpy(g.outf + len, g.decode ? repl : g.sufx);\n" }, { "line_no": 243, "char_start": 8755, "char_end": 8815, "line": " (g.force ? 0 : O_EXCL), 0600);\n" } ], "added": [ { "line_no": 224, "char_start": 8033, "char_end": 8071, "line": " char *to = g.inf, *sufx = \"\";\n" }, { "line_no": 225, "char_start": 8071, "char_end": 8095, "line": " size_t pre = 0;\n" }, { "line_no": 226, "char_start": 8095, "char_end": 8096, "line": "\n" }, { "line_no": 227, "char_start": 8096, "char_end": 8147, "line": " /* select parts of the output file name */\n" }, { "line_no": 228, "char_start": 8147, "char_end": 8171, "line": " if (g.decode) {\n" }, { "line_no": 229, "char_start": 8171, "char_end": 8249, "line": " /* for -dN or -dNT, use the path from the input file and the name\n" }, { "line_no": 230, "char_start": 8249, "char_end": 8322, "line": " from the header, stripping any path in the header name */\n" }, { "line_no": 231, "char_start": 8322, "char_end": 8380, "line": " if ((g.headis & 1) != 0 && g.hname != NULL) {\n" }, { "line_no": 232, "char_start": 8380, "char_end": 8427, "line": " pre = justname(g.inf) - g.inf;\n" }, { "line_no": 233, "char_start": 8427, "char_end": 8467, "line": " to = justname(g.hname);\n" }, { "line_no": 234, "char_start": 8467, "char_end": 8501, "line": " len = strlen(to);\n" }, { "line_no": 235, "char_start": 8501, "char_end": 8515, "line": " }\n" }, { "line_no": 236, "char_start": 8515, "char_end": 8578, "line": " /* for -d or -dNn, replace abbreviated suffixes */\n" }, { "line_no": 237, "char_start": 8578, "char_end": 8630, "line": " else if (strcmp(to + len, \".tgz\") == 0)\n" }, { "line_no": 238, "char_start": 8630, "char_end": 8661, "line": " sufx = \".tar\";\n" }, { "line_no": 240, "char_start": 8671, "char_end": 8684, "line": " else\n" }, { "line_no": 241, "char_start": 8684, "char_end": 8742, "line": " /* add appropriate suffix when compressing */\n" }, { "line_no": 242, "char_start": 8742, "char_end": 8769, "line": " sufx = g.sufx;\n" }, { "line_no": 245, "char_start": 8821, "char_end": 8876, "line": " g.outf = MALLOC(pre + len + strlen(sufx) + 1);\n" }, { "line_no": 248, "char_start": 8947, "char_end": 8983, "line": " memcpy(g.outf, g.inf, pre);\n" }, { "line_no": 249, "char_start": 8983, "char_end": 9022, "line": " memcpy(g.outf + pre, to, len);\n" }, { "line_no": 250, "char_start": 9022, "char_end": 9064, "line": " strcpy(g.outf + pre + len, sufx);\n" }, { "line_no": 252, "char_start": 9125, "char_end": 9186, "line": " (g.force ? 0 : O_EXCL), 0600);\n" } ] }
{ "deleted": [ { "char_start": 8052, "char_end": 8053, "chars": "r" }, { "char_start": 8055, "char_end": 8056, "chars": "l" }, { "char_start": 8070, "char_end": 8071, "chars": "u" }, { "char_start": 8076, "char_end": 8078, "chars": "ad" }, { "char_start": 8079, "char_end": 8080, "chars": "r" }, { "char_start": 8094, "char_end": 8095, "chars": "u" }, { "char_start": 8096, "char_end": 8098, "chars": " w" }, { "char_start": 8099, "char_end": 8101, "chars": "en" }, { "char_start": 8102, "char_end": 8105, "chars": "dec" }, { "char_start": 8107, "char_end": 8109, "chars": "pr" }, { "char_start": 8110, "char_end": 8112, "chars": "ss" }, { "char_start": 8114, "char_end": 8115, "chars": "g" }, { "char_start": 8116, "char_end": 8117, "chars": "w" }, { "char_start": 8120, "char_end": 8123, "chars": " -N" }, { "char_start": 8124, "char_end": 8126, "chars": "*/" }, { "char_start": 8135, "char_end": 8136, "chars": "t" }, { "char_start": 8138, "char_end": 8139, "chars": "=" }, { "char_start": 8141, "char_end": 8142, "chars": "." }, { "char_start": 8144, "char_end": 8146, "chars": "f;" }, { "char_start": 8159, "char_end": 8171, "chars": "g.decode && " }, { "char_start": 8264, "char_end": 8271, "chars": "g.hname" }, { "char_start": 8283, "char_end": 8284, "chars": "\n" }, { "char_start": 8304, "char_end": 8316, "chars": ".tgz with .t" }, { "char_start": 8318, "char_end": 8321, "chars": " wh" }, { "char_start": 8322, "char_end": 8325, "chars": "n d" }, { "char_start": 8326, "char_end": 8328, "chars": "co" }, { "char_start": 8330, "char_end": 8332, "chars": "ng" }, { "char_start": 8344, "char_end": 8348, "chars": "repl" }, { "char_start": 8349, "char_end": 8350, "chars": "=" }, { "char_start": 8351, "char_end": 8354, "chars": "g.d" }, { "char_start": 8355, "char_end": 8358, "chars": "cod" }, { "char_start": 8360, "char_end": 8362, "chars": "&&" }, { "char_start": 8388, "char_end": 8389, "chars": "?" }, { "char_start": 8390, "char_end": 8392, "chars": "\"\"" }, { "char_start": 8393, "char_end": 8394, "chars": ":" }, { "char_start": 8479, "char_end": 8480, "chars": "l" }, { "char_start": 8481, "char_end": 8482, "chars": "n" }, { "char_start": 8485, "char_end": 8500, "chars": "(g.decode ? str" }, { "char_start": 8503, "char_end": 8509, "chars": "(repl)" }, { "char_start": 8510, "char_end": 8511, "chars": ":" }, { "char_start": 8519, "char_end": 8521, "chars": "g." }, { "char_start": 8525, "char_end": 8526, "chars": ")" }, { "char_start": 8628, "char_end": 8630, "chars": "to" }, { "char_start": 8632, "char_end": 8633, "chars": "l" }, { "char_start": 8634, "char_end": 8635, "chars": "n" }, { "char_start": 8646, "char_end": 8649, "chars": "str" }, { "char_start": 8665, "char_end": 8666, "chars": "," }, { "char_start": 8669, "char_end": 8672, "chars": "dec" }, { "char_start": 8673, "char_end": 8675, "chars": "de" }, { "char_start": 8676, "char_end": 8677, "chars": "?" }, { "char_start": 8680, "char_end": 8682, "chars": "pl" }, { "char_start": 8683, "char_end": 8684, "chars": ":" }, { "char_start": 8685, "char_end": 8687, "chars": "g." } ], "added": [ { "char_start": 8049, "char_end": 8057, "chars": " = g.inf" }, { "char_start": 8060, "char_end": 8087, "chars": "sufx = \"\";\n size_t p" }, { "char_start": 8089, "char_end": 8093, "chars": " = 0" }, { "char_start": 8109, "char_end": 8113, "chars": "lect" }, { "char_start": 8114, "char_end": 8124, "chars": "parts of t" }, { "char_start": 8126, "char_end": 8137, "chars": " output fil" }, { "char_start": 8144, "char_end": 8186, "chars": "*/\n if (g.decode) {\n /* " }, { "char_start": 8190, "char_end": 8194, "chars": "-dN " }, { "char_start": 8195, "char_end": 8203, "chars": "r -dNT, " }, { "char_start": 8204, "char_end": 8207, "chars": "se " }, { "char_start": 8208, "char_end": 8211, "chars": "he " }, { "char_start": 8212, "char_end": 8213, "chars": "a" }, { "char_start": 8216, "char_end": 8218, "chars": "fr" }, { "char_start": 8220, "char_end": 8223, "chars": " th" }, { "char_start": 8224, "char_end": 8225, "chars": " " }, { "char_start": 8227, "char_end": 8230, "chars": "put" }, { "char_start": 8231, "char_end": 8232, "chars": "f" }, { "char_start": 8233, "char_end": 8240, "chars": "le and " }, { "char_start": 8242, "char_end": 8250, "chars": "e name\n " }, { "char_start": 8259, "char_end": 8269, "chars": " from " }, { "char_start": 8270, "char_end": 8272, "chars": "he" }, { "char_start": 8273, "char_end": 8280, "chars": "header," }, { "char_start": 8281, "char_end": 8289, "chars": "strippin" }, { "char_start": 8290, "char_end": 8300, "chars": " any path " }, { "char_start": 8302, "char_end": 8321, "chars": " the header name */" }, { "char_start": 8322, "char_end": 8326, "chars": " " }, { "char_start": 8392, "char_end": 8443, "chars": " pre = justname(g.inf) - g.inf;\n " }, { "char_start": 8448, "char_end": 8457, "chars": "justname(" }, { "char_start": 8464, "char_end": 8465, "chars": ")" }, { "char_start": 8467, "char_end": 8471, "chars": " " }, { "char_start": 8496, "char_end": 8498, "chars": "to" }, { "char_start": 8509, "char_end": 8513, "chars": " " }, { "char_start": 8515, "char_end": 8519, "chars": " " }, { "char_start": 8530, "char_end": 8546, "chars": "for -d or -dNn, " }, { "char_start": 8555, "char_end": 8557, "chars": "bb" }, { "char_start": 8559, "char_end": 8563, "chars": "viat" }, { "char_start": 8565, "char_end": 8570, "chars": " suff" }, { "char_start": 8571, "char_end": 8574, "chars": "xes" }, { "char_start": 8586, "char_end": 8588, "chars": " " }, { "char_start": 8591, "char_end": 8593, "chars": "ls" }, { "char_start": 8595, "char_end": 8597, "chars": "if" }, { "char_start": 8598, "char_end": 8599, "chars": "(" }, { "char_start": 8624, "char_end": 8645, "chars": "== 0)\n " }, { "char_start": 8646, "char_end": 8650, "chars": "sufx" }, { "char_start": 8651, "char_end": 8652, "chars": "=" }, { "char_start": 8661, "char_end": 8769, "chars": " }\n else\n /* add appropriate suffix when compressing */\n sufx = g.sufx;\n" }, { "char_start": 8845, "char_end": 8847, "chars": "pr" }, { "char_start": 8855, "char_end": 8856, "chars": "+" }, { "char_start": 8970, "char_end": 8975, "chars": "g.inf" }, { "char_start": 8977, "char_end": 8979, "chars": "pr" }, { "char_start": 8991, "char_end": 8994, "chars": "mem" }, { "char_start": 9006, "char_end": 9015, "chars": " pre, to," }, { "char_start": 9019, "char_end": 9029, "chars": ");\n " }, { "char_start": 9030, "char_end": 9037, "chars": "strcpy(" }, { "char_start": 9040, "char_end": 9043, "chars": "utf" }, { "char_start": 9044, "char_end": 9045, "chars": "+" }, { "char_start": 9046, "char_end": 9047, "chars": "p" }, { "char_start": 9049, "char_end": 9052, "chars": " + " }, { "char_start": 9053, "char_end": 9056, "chars": "en," }, { "char_start": 9125, "char_end": 9126, "chars": " " } ] }
github.com/madler/pigz/commit/fdad1406b3ec809f4954ff7cdf9e99eb18c2458f
pigz.c
cwe-022
imap_hcache_open
header_cache_t *imap_hcache_open(struct ImapData *idata, const char *path) { struct ImapMbox mx; struct Url url; char cachepath[PATH_MAX]; char mbox[PATH_MAX]; if (path) imap_cachepath(idata, path, mbox, sizeof(mbox)); else { if (!idata->ctx || imap_parse_path(idata->ctx->path, &mx) < 0) return NULL; imap_cachepath(idata, mx.mbox, mbox, sizeof(mbox)); FREE(&mx.mbox); } mutt_account_tourl(&idata->conn->account, &url); url.path = mbox; url_tostring(&url, cachepath, sizeof(cachepath), U_PATH); return mutt_hcache_open(HeaderCache, cachepath, imap_hcache_namer); }
header_cache_t *imap_hcache_open(struct ImapData *idata, const char *path) { struct ImapMbox mx; struct Url url; char cachepath[PATH_MAX]; char mbox[PATH_MAX]; if (path) imap_cachepath(idata, path, mbox, sizeof(mbox)); else { if (!idata->ctx || imap_parse_path(idata->ctx->path, &mx) < 0) return NULL; imap_cachepath(idata, mx.mbox, mbox, sizeof(mbox)); FREE(&mx.mbox); } if (strstr(mbox, "/../") || (strcmp(mbox, "..") == 0) || (strncmp(mbox, "../", 3) == 0)) return NULL; size_t len = strlen(mbox); if ((len > 3) && (strcmp(mbox + len - 3, "/..") == 0)) return NULL; mutt_account_tourl(&idata->conn->account, &url); url.path = mbox; url_tostring(&url, cachepath, sizeof(cachepath), U_PATH); return mutt_hcache_open(HeaderCache, cachepath, imap_hcache_namer); }
{ "deleted": [], "added": [ { "line_no": 19, "char_start": 413, "char_end": 504, "line": " if (strstr(mbox, \"/../\") || (strcmp(mbox, \"..\") == 0) || (strncmp(mbox, \"../\", 3) == 0))\n" }, { "line_no": 20, "char_start": 504, "char_end": 521, "line": " return NULL;\n" }, { "line_no": 21, "char_start": 521, "char_end": 550, "line": " size_t len = strlen(mbox);\n" }, { "line_no": 22, "char_start": 550, "char_end": 607, "line": " if ((len > 3) && (strcmp(mbox + len - 3, \"/..\") == 0))\n" }, { "line_no": 23, "char_start": 607, "char_end": 624, "line": " return NULL;\n" }, { "line_no": 24, "char_start": 624, "char_end": 625, "line": "\n" } ] }
{ "deleted": [], "added": [ { "char_start": 415, "char_end": 627, "chars": "if (strstr(mbox, \"/../\") || (strcmp(mbox, \"..\") == 0) || (strncmp(mbox, \"../\", 3) == 0))\n return NULL;\n size_t len = strlen(mbox);\n if ((len > 3) && (strcmp(mbox + len - 3, \"/..\") == 0))\n return NULL;\n\n " } ] }
github.com/neomutt/neomutt/commit/57971dba06346b2d7179294f4528b8d4427a7c5d
imap/util.c
cwe-022
_normalize
def _normalize(self, metaerrors): """Normalize output format to be usable by Anaconda's linting frontend """ errors = [] for error in metaerrors: if self.filepath not in error.get('path', ''): continue error_type = error.get('severity', 'X').capitalize()[0] if error_type == 'X': continue if error_type not in ['E', 'W']: error_type = 'V' errors.append({ 'underline_range': True, 'lineno': error.get('line', 0), 'offset': error.get('col', 0), 'raw_message': error.get('message', ''), 'code': 0, 'level': error_type, 'message': '[{0}] {1} ({2}): {3}'.format( error_type, error.get('linter', 'none'), error.get('severity', 'none'), error.get('message') ) }) return errors
def _normalize(self, metaerrors): """Normalize output format to be usable by Anaconda's linting frontend """ errors = [] for error in metaerrors: last_path = os.path.join( os.path.basename(os.path.dirname(self.filepath)), os.path.basename(self.filepath) ) if last_path not in error.get('path', ''): continue error_type = error.get('severity', 'X').capitalize()[0] if error_type == 'X': continue if error_type not in ['E', 'W']: error_type = 'V' errors.append({ 'underline_range': True, 'lineno': error.get('line', 0), 'offset': error.get('col', 0), 'raw_message': error.get('message', ''), 'code': 0, 'level': error_type, 'message': '[{0}] {1} ({2}): {3}'.format( error_type, error.get('linter', 'none'), error.get('severity', 'none'), error.get('message') ) }) return errors
{ "deleted": [ { "line_no": 7, "char_start": 183, "char_end": 242, "line": " if self.filepath not in error.get('path', ''):\n" } ], "added": [ { "line_no": 7, "char_start": 183, "char_end": 221, "line": " last_path = os.path.join(\n" }, { "line_no": 8, "char_start": 221, "char_end": 287, "line": " os.path.basename(os.path.dirname(self.filepath)),\n" }, { "line_no": 9, "char_start": 287, "char_end": 335, "line": " os.path.basename(self.filepath)\n" }, { "line_no": 10, "char_start": 335, "char_end": 349, "line": " )\n" }, { "line_no": 11, "char_start": 349, "char_end": 404, "line": " if last_path not in error.get('path', ''):\n" } ] }
{ "deleted": [], "added": [ { "char_start": 195, "char_end": 217, "chars": "last_path = os.path.jo" }, { "char_start": 218, "char_end": 273, "chars": "n(\n os.path.basename(os.path.dirname(sel" }, { "char_start": 274, "char_end": 287, "chars": ".filepath)),\n" }, { "char_start": 288, "char_end": 320, "chars": " os.path.basename(" }, { "char_start": 329, "char_end": 369, "chars": "path)\n )\n if last_" } ] }
github.com/DamnWidget/anaconda_go/commit/d3db90bb8853d832927818699591b91f56f6413c
plugin/handlers_go/anagonda/context/gometalinter.py
cwe-022
dd_load_text_ext
char* dd_load_text_ext(const struct dump_dir *dd, const char *name, unsigned flags) { // if (!dd->locked) // error_msg_and_die("dump_dir is not opened"); /* bug */ /* Compat with old abrt dumps. Remove in abrt-2.1 */ if (strcmp(name, "release") == 0) name = FILENAME_OS_RELEASE; char *full_path = concat_path_file(dd->dd_dirname, name); char *ret = load_text_file(full_path, flags); free(full_path); return ret; }
char* dd_load_text_ext(const struct dump_dir *dd, const char *name, unsigned flags) { // if (!dd->locked) // error_msg_and_die("dump_dir is not opened"); /* bug */ if (!str_is_correct_filename(name)) { error_msg("Cannot load text. '%s' is not a valid file name", name); if (!(flags & DD_LOAD_TEXT_RETURN_NULL_ON_FAILURE)) xfunc_die(); } /* Compat with old abrt dumps. Remove in abrt-2.1 */ if (strcmp(name, "release") == 0) name = FILENAME_OS_RELEASE; char *full_path = concat_path_file(dd->dd_dirname, name); char *ret = load_text_file(full_path, flags); free(full_path); return ret; }
{ "deleted": [], "added": [ { "line_no": 6, "char_start": 175, "char_end": 215, "line": " if (!str_is_correct_filename(name))\n" }, { "line_no": 7, "char_start": 215, "char_end": 221, "line": " {\n" }, { "line_no": 8, "char_start": 221, "char_end": 297, "line": " error_msg(\"Cannot load text. '%s' is not a valid file name\", name);\n" }, { "line_no": 9, "char_start": 297, "char_end": 357, "line": " if (!(flags & DD_LOAD_TEXT_RETURN_NULL_ON_FAILURE))\n" }, { "line_no": 10, "char_start": 357, "char_end": 382, "line": " xfunc_die();\n" }, { "line_no": 11, "char_start": 382, "char_end": 388, "line": " }\n" }, { "line_no": 12, "char_start": 388, "char_end": 389, "line": "\n" } ] }
{ "deleted": [], "added": [ { "char_start": 179, "char_end": 393, "chars": "if (!str_is_correct_filename(name))\n {\n error_msg(\"Cannot load text. '%s' is not a valid file name\", name);\n if (!(flags & DD_LOAD_TEXT_RETURN_NULL_ON_FAILURE))\n xfunc_die();\n }\n\n " } ] }
github.com/abrt/libreport/commit/239c4f7d1f47265526b39ad70106767d00805277
src/lib/dump_dir.c
cwe-022
_download_file
@staticmethod def _download_file(bucket, filename, local_dir): key = bucket.get_key(filename) local_filename = os.path.join(local_dir, filename) key.get_contents_to_filename(local_filename) return local_filename
@staticmethod def _download_file(bucket, filename, local_dir): key = bucket.get_key(filename) local_filename = os.path.join(local_dir, os.path.basename(filename)) key.get_contents_to_filename(local_filename) return local_filename
{ "deleted": [ { "line_no": 4, "char_start": 110, "char_end": 169, "line": " local_filename = os.path.join(local_dir, filename)\n" } ], "added": [ { "line_no": 4, "char_start": 110, "char_end": 187, "line": " local_filename = os.path.join(local_dir, os.path.basename(filename))\n" } ] }
{ "deleted": [], "added": [ { "char_start": 159, "char_end": 176, "chars": "os.path.basename(" }, { "char_start": 184, "char_end": 185, "chars": ")" } ] }
github.com/openstack/nova/commit/76363226bd8533256f7795bba358d7f4b8a6c9e6
nova/image/s3.py
cwe-022

Dataset Card for "sven"

Unofficial, not affiliated with the authors.

Paper: https://arxiv.org/abs/2302.05319
Repository: https://github.com/eth-sri/sven
Downloads last month
1
Edit dataset card