diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md deleted file mode 100644 index 2191affe..00000000 --- a/.github/pull_request_template.md +++ /dev/null @@ -1,7 +0,0 @@ -Please specify the following in the description: - -- Meaningful title. Use the tempalte: ```[Milestone]```.```PullRequest_Title``` -- A reference to the original GitHub issue using # selector. -- A description/notes about this pull request. -- If you fix a bug don't forget about test case. -- If you add a new feature don't forget about test case. diff --git a/test/validation/BaseTextEditor.java b/test/validation/BaseTextEditor.java new file mode 100644 index 00000000..9100d6a9 --- /dev/null +++ b/test/validation/BaseTextEditor.java @@ -0,0 +1,66 @@ +package org.jkiss.dbeaver.ui.editors.text; +import org.eclipse.core.resources.IFile; +import org.eclipse.core.resources.ResourcesPlugin; +import org.eclipse.core.runtime.CoreException; +import org.eclipse.jface.action.GroupMarker; +import org.eclipse.jface.action.IAction; +import org.eclipse.jface.action.IMenuManager; +import org.eclipse.jface.action.Separator; +import org.eclipse.jface.text.IDocument; +import org.eclipse.jface.text.IUndoManager; +import org.eclipse.jface.text.TextViewer; +import org.eclipse.jface.text.source.SourceViewer; +import org.eclipse.swt.SWT; +import org.eclipse.swt.custom.ST; +import org.eclipse.swt.custom.StyledText; +import org.eclipse.swt.widgets.Composite; +import org.eclipse.ui.IEditorInput; +import org.eclipse.ui.IEditorPart; +import org.eclipse.ui.IWorkbenchActionConstants; +import org.eclipse.ui.texteditor.AbstractDecoratedTextEditor; +import org.eclipse.ui.texteditor.IDocumentProvider; +import org.eclipse.ui.texteditor.ITextEditorActionConstants; +import org.jkiss.code.Nullable; +import org.jkiss.dbeaver.runtime.DBWorkbench; +import org.jkiss.dbeaver.ui.ICommentsSupport; +import org.jkiss.dbeaver.ui.ISingleControlEditor; +import org.jkiss.dbeaver.ui.UIUtils; +import org.jkiss.dbeaver.ui.dialogs.DialogUtils; +import org.jkiss.dbeaver.ui.editors.*; +import org.jkiss.dbeaver.utils.ContentUtils; +import org.jkiss.dbeaver.utils.GeneralUtils; +import org.jkiss.utils.IOUtils; +import java.io.*; +import java.lang.reflect.InvocationTargetException; +import java.util.ArrayList; +import java.util.List; +public abstract class BaseTextEditor extends AbstractDecoratedTextEditor implements ISingleControlEditor { + public static final String TEXT_EDITOR_CONTEXT = "org.eclipse.ui.textEditorScope"; + public static final String GROUP_SQL_PREFERENCES = "sql.preferences"; + public static final String GROUP_SQL_ADDITIONS = "sql.additions"; + public static final String GROUP_SQL_EXTRAS = "sql.extras"; + private List actionContributors = new ArrayList<>(); + public void addContextMenuContributor(IActionContributor contributor) { + actionContributors.add(contributor); + } + public static BaseTextEditor getTextEditor(IEditorPart editor) + { + if (editor == null) { + return null; + } + if (editor instanceof BaseTextEditor) { + return (BaseTextEditor) editor; + } + return editor.getAdapter(BaseTextEditor.class); + } + @Override + protected void doSetInput(IEditorInput input) throws CoreException { + if (input != getEditorInput()) { + IEditorInput editorInput = getEditorInput(); + if (editorInput instanceof IStatefulEditorInput) { + ((IStatefulEditorInput) editorInput).release(); + } + } + super.doSetInput(input); + } +} \ No newline at end of file diff --git a/test/validation/CssPreprocessors.java b/test/validation/CssPreprocessors.java new file mode 100644 index 00000000..c768831e --- /dev/null +++ b/test/validation/CssPreprocessors.java @@ -0,0 +1,43 @@ +package org.netbeans.modules.web.common.api; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import org.netbeans.api.annotations.common.CheckForNull; +import org.netbeans.api.annotations.common.NonNull; +import org.netbeans.api.annotations.common.NullAllowed; +import org.netbeans.api.project.Project; +import org.netbeans.modules.web.common.cssprep.CssPreprocessorAccessor; +import org.netbeans.modules.web.common.cssprep.CssPreprocessorsAccessor; +import org.netbeans.modules.web.common.spi.CssPreprocessorImplementation; +import org.netbeans.modules.web.common.spi.CssPreprocessorImplementationListener; +import org.openide.filesystems.FileObject; +import org.openide.util.Lookup; +import org.openide.util.LookupEvent; +import org.openide.util.LookupListener; +import org.openide.util.Parameters; +import org.openide.util.RequestProcessor; +import org.openide.util.lookup.Lookups; +public final class CssPreprocessors { + public static final String PREPROCESSORS_PATH = "CSS/PreProcessors"; + private static final RequestProcessor RP = new RequestProcessor(CssPreprocessors.class.getName(), 2); + private static final Lookup.Result PREPROCESSORS = Lookups.forPath(PREPROCESSORS_PATH).lookupResult(CssPreprocessorImplementation.class); + private static final CssPreprocessors INSTANCE = new CssPreprocessors(); + private final List preprocessors = new CopyOnWriteArrayList<>(); + final CssPreprocessorsListener.Support listenersSupport = new CssPreprocessorsListener.Support(); + private final PreprocessorImplementationsListener preprocessorImplementationsListener = new PreprocessorImplementationsListener(); + + void reinitProcessors() { + synchronized (preprocessors) { + clearProcessors(); + assert preprocessors.isEmpty() : "Empty preprocessors expected but: " + preprocessors; + preprocessors.addAll(map(PREPROCESSORS.allInstances())); + for (CssPreprocessor cssPreprocessor : preprocessors) { + cssPreprocessor.getDelegate().addCssPreprocessorListener(preprocessorImplementationsListener); + } + } + listenersSupport.firePreprocessorsChanged(); + } + +} \ No newline at end of file diff --git a/test/validation/DynaMenuModel.java b/test/validation/DynaMenuModel.java new file mode 100644 index 00000000..39ff1f13 --- /dev/null +++ b/test/validation/DynaMenuModel.java @@ -0,0 +1,111 @@ +package org.openide.awt; +import java.awt.Component; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import javax.swing.Action; +import javax.swing.Icon; +import javax.swing.ImageIcon; +import javax.swing.JComponent; +import javax.swing.JMenu; +import javax.swing.JMenuItem; +import javax.swing.JPopupMenu; +import javax.swing.JSeparator; +import javax.swing.UIManager; +import org.openide.filesystems.FileObject; +import org.openide.util.ImageUtilities; +import org.openide.util.Utilities; +import org.openide.util.actions.Presenter; +class DynaMenuModel { + private static final Icon BLANK_ICON = new ImageIcon(ImageUtilities.loadImage("org/openide/loaders/empty.gif")); + private List menuItems; + private HashMap actionToMenuMap; + private boolean isWithIcons = false; + public DynaMenuModel() { + actionToMenuMap = new HashMap(); + } + public void loadSubmenu(List cInstances, JMenu m, boolean remove, Map cookiesToFiles) { + boolean addSeparator = false; + Icon curIcon = null; + Iterator it = cInstances.iterator(); + menuItems = new ArrayList(cInstances.size()); + actionToMenuMap.clear(); + while (it.hasNext()) { + Object obj = it.next(); + if (obj instanceof Action) { + FileObject file = cookiesToFiles.get(obj); + if (file != null) { + AcceleratorBinding.setAccelerator((Action) obj, file); + } + } + if (obj instanceof Presenter.Menu) { + obj = ((Presenter.Menu)obj).getMenuPresenter(); + } + if (obj instanceof DynamicMenuContent) { + if(addSeparator) { + menuItems.add(null); + addSeparator = false; + } + DynamicMenuContent mn = (DynamicMenuContent)obj; + JComponent[] itms = convertArray(mn.getMenuPresenters()); + actionToMenuMap.put(mn, itms); + Iterator itx = Arrays.asList(itms).iterator(); + while (itx.hasNext()) { + JComponent comp = (JComponent)itx.next(); + menuItems.add(comp); + isWithIcons = checkIcon(comp, isWithIcons); + } + continue; + } + if (obj instanceof JMenuItem) { + if(addSeparator) { + menuItems.add(null); + addSeparator = false; + } + isWithIcons = checkIcon(obj, isWithIcons); + menuItems.add((JMenuItem)obj); + } else if (obj instanceof JSeparator) { + addSeparator = menuItems.size() > 0; + } else if (obj instanceof Action) { + if(addSeparator) { + menuItems.add(null); + addSeparator = false; + } + Action a = (Action)obj; + Actions.MenuItem item = new Actions.MenuItem(a, true); + isWithIcons = checkIcon(item, isWithIcons); + actionToMenuMap.put(item, new JComponent[] {item}); + menuItems.add(item); + } + } + if (isWithIcons) { + menuItems = alignVertically(menuItems); + } + if (remove) { + m.removeAll(); + } + JComponent curItem = null; + boolean wasSeparator = false; + for (Iterator iter = menuItems.iterator(); iter.hasNext(); ) { + curItem = iter.next(); + if (curItem == null) { + JMenu menu = new JMenu(); + menu.addSeparator(); + curItem = (JSeparator)menu.getPopupMenu().getComponent(0); + } + m.add(curItem); + boolean isSeparator = curItem instanceof JSeparator; + if (isSeparator && wasSeparator) { + curItem.setVisible(false); + } + if (!(curItem instanceof InvisibleMenuItem)) { + wasSeparator = isSeparator; + } + } + } + +} \ No newline at end of file diff --git a/test/validation/MetadataEncoder.java b/test/validation/MetadataEncoder.java new file mode 100644 index 00000000..612fa758 --- /dev/null +++ b/test/validation/MetadataEncoder.java @@ -0,0 +1,59 @@ +package org.springframework.messaging.rsocket; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.CompositeByteBuf; +import io.rsocket.metadata.CompositeMetadataFlyweight; +import io.rsocket.metadata.TaggingMetadataFlyweight; +import io.rsocket.metadata.WellKnownMimeType; +import reactor.core.publisher.Mono; +import org.springframework.core.ReactiveAdapter; +import org.springframework.core.ResolvableType; +import org.springframework.core.codec.Encoder; +import org.springframework.core.io.buffer.DataBuffer; +import org.springframework.core.io.buffer.DataBufferFactory; +import org.springframework.core.io.buffer.NettyDataBufferFactory; +import org.springframework.lang.Nullable; +import org.springframework.util.Assert; +import org.springframework.util.CollectionUtils; +import org.springframework.util.MimeType; +import org.springframework.util.ObjectUtils; +final class MetadataEncoder { + private static final Pattern VARS_PATTERN = Pattern.compile("\\{(.+?)}"); + private static final Object NO_VALUE = new Object(); + private final MimeType metadataMimeType; + private final RSocketStrategies strategies; + private final boolean isComposite; + private final ByteBufAllocator allocator; + @Nullable + private String route; + private final List metadataEntries = new ArrayList<>(4); + private boolean hasAsyncValues; + MetadataEncoder(MimeType metadataMimeType, RSocketStrategies strategies) { + Assert.notNull(metadataMimeType, "'metadataMimeType' is required"); + Assert.notNull(strategies, "RSocketStrategies is required"); + this.metadataMimeType = metadataMimeType; + this.strategies = strategies; + this.isComposite = this.metadataMimeType.toString().equals( + WellKnownMimeType.MESSAGE_RSOCKET_COMPOSITE_METADATA.getString()); + this.allocator = bufferFactory() instanceof NettyDataBufferFactory ? + ((NettyDataBufferFactory) bufferFactory()).getByteBufAllocator() : ByteBufAllocator.DEFAULT; + } + private DataBufferFactory bufferFactory() { + return this.strategies.dataBufferFactory(); + } + public MetadataEncoder route(String route, Object... routeVars) { + this.route = expand(route, routeVars); + if (!this.isComposite) { + int count = this.route != null ? this.metadataEntries.size() + 1 : this.metadataEntries.size(); + Assert.isTrue(count < 2, "Composite metadata required for multiple metadata entries."); + } + return this; + } + +} \ No newline at end of file diff --git a/test/validation/NameNodeRpcServer.java b/test/validation/NameNodeRpcServer.java new file mode 100644 index 00000000..41821188 --- /dev/null +++ b/test/validation/NameNodeRpcServer.java @@ -0,0 +1,239 @@ +package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH; +import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIFELINE_HANDLER_COUNT_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIFELINE_HANDLER_RATIO_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIFELINE_HANDLER_RATIO_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_AUXILIARY_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_STATE_CONTEXT_ENABLED_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.MAX_PATH_DEPTH; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.MAX_PATH_LENGTH; +import static org.apache.hadoop.util.Time.now; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.security.PrivilegedExceptionAction; +import java.util.Arrays; +import java.util.Collection; +import java.util.EnumSet; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import com.google.common.collect.Lists; +import org.apache.hadoop.HadoopIllegalArgumentException; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.ReconfigurationTaskStatus; +import org.apache.hadoop.crypto.CryptoProtocolVersion; +import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; +import org.apache.hadoop.hdfs.AddBlockFlag; +import org.apache.hadoop.fs.CacheFlag; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.CreateFlag; +import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FsServerDefaults; +import org.apache.hadoop.fs.InvalidPathException; +import org.apache.hadoop.fs.Options; +import org.apache.hadoop.fs.ParentNotDirectoryException; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.fs.UnresolvedLinkException; +import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.fs.XAttrSetFlag; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.fs.QuotaUsage; +import org.apache.hadoop.ha.HAServiceStatus; +import org.apache.hadoop.ha.HealthCheckFailedException; +import org.apache.hadoop.ha.ServiceFailedException; +import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceProtocolService; +import org.apache.hadoop.ha.protocolPB.HAServiceProtocolPB; +import org.apache.hadoop.ha.protocolPB.HAServiceProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.DFSUtilClient; +import org.apache.hadoop.hdfs.HDFSPolicyProvider; +import org.apache.hadoop.hdfs.inotify.EventBatch; +import org.apache.hadoop.hdfs.inotify.EventBatchList; +import org.apache.hadoop.hdfs.protocol.AclException; +import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse; +import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; +import org.apache.hadoop.hdfs.protocol.BatchedDirectoryListing; +import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; +import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; +import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; +import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; +import org.apache.hadoop.hdfs.protocol.CachePoolEntry; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; +import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; +import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; +import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.ECBlockGroupStats; +import org.apache.hadoop.hdfs.protocol.ECTopologyVerifierResult; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; +import org.apache.hadoop.hdfs.protocol.EncryptionZone; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.FSLimitException; +import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; +import org.apache.hadoop.hdfs.protocol.HdfsPartialListing; +import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; +import org.apache.hadoop.hdfs.protocol.OpenFileEntry; +import org.apache.hadoop.hdfs.protocol.OpenFilesIterator; +import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType; +import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException; +import org.apache.hadoop.hdfs.protocol.QuotaExceededException; +import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; +import org.apache.hadoop.hdfs.protocol.ReplicatedBlockStats; +import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus; +import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing; +import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; +import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; +import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol; +import org.apache.hadoop.hdfs.protocol.proto.DatanodeLifelineProtocolProtos.DatanodeLifelineProtocolService; +import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeProtocolService; +import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.NamenodeProtocolService; +import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ReconfigurationProtocolService; +import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB; +import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdfs.protocolPB.DatanodeLifelineProtocolPB; +import org.apache.hadoop.hdfs.protocolPB.DatanodeLifelineProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolPB; +import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB; +import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdfs.protocolPB.ReconfigurationProtocolPB; +import org.apache.hadoop.hdfs.protocolPB.ReconfigurationProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; +import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; +import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerFaultInjector; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; +import org.apache.hadoop.hdfs.server.common.HttpGetFailedException; +import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; +import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; +import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; +import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfyManager; +import org.apache.hadoop.hdfs.server.protocol.BlockReportContext; +import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; +import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; +import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; +import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; +import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; +import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; +import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; +import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.hdfs.server.protocol.NodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.RegisterCommand; +import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; +import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports; +import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports; +import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; +import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; +import org.apache.hadoop.hdfs.server.protocol.StorageReport; +import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary; +import org.apache.hadoop.io.EnumSetWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ipc.RetriableException; +import org.apache.hadoop.ipc.RetryCache; +import org.apache.hadoop.ipc.RetryCache.CacheEntry; +import org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload; +import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.ipc.StandbyException; +import org.apache.hadoop.ipc.RefreshRegistry; +import org.apache.hadoop.ipc.RefreshResponse; +import org.apache.hadoop.net.Node; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.Groups; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.security.authorize.ProxyUsers; +import org.apache.hadoop.security.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshAuthorizationPolicyProtocolService; +import org.apache.hadoop.security.proto.RefreshUserMappingsProtocolProtos.RefreshUserMappingsProtocolService; +import org.apache.hadoop.security.protocolPB.RefreshAuthorizationPolicyProtocolPB; +import org.apache.hadoop.security.protocolPB.RefreshAuthorizationPolicyProtocolServerSideTranslatorPB; +import org.apache.hadoop.security.protocolPB.RefreshUserMappingsProtocolPB; +import org.apache.hadoop.security.protocolPB.RefreshUserMappingsProtocolServerSideTranslatorPB; +import org.apache.hadoop.ipc.protocolPB.RefreshCallQueueProtocolPB; +import org.apache.hadoop.ipc.protocolPB.RefreshCallQueueProtocolServerSideTranslatorPB; +import org.apache.hadoop.ipc.proto.RefreshCallQueueProtocolProtos.RefreshCallQueueProtocolService; +import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolPB; +import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolServerSideTranslatorPB; +import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshProtocolService; +import org.apache.hadoop.security.token.SecretManager.InvalidToken; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.tools.proto.GetUserMappingsProtocolProtos.GetUserMappingsProtocolService; +import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolPB; +import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolServerSideTranslatorPB; +import org.apache.hadoop.tracing.SpanReceiverInfo; +import org.apache.hadoop.tracing.TraceAdminPB.TraceAdminService; +import org.apache.hadoop.tracing.TraceAdminProtocolPB; +import org.apache.hadoop.tracing.TraceAdminProtocolServerSideTranslatorPB; +import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.util.VersionUtil; +import org.slf4j.Logger; +import com.google.common.annotations.VisibleForTesting; +import com.google.protobuf.BlockingService; +import javax.annotation.Nonnull; +@InterfaceAudience.Private +@VisibleForTesting +public class NameNodeRpcServer implements NamenodeProtocols { + private static final Logger LOG = NameNode.LOG; + private static final Logger stateChangeLog = NameNode.stateChangeLog; + private static final Logger blockStateChangeLog = NameNode + .blockStateChangeLog; + protected final FSNamesystem namesystem; + protected final NameNode nn; + private final NameNodeMetrics metrics; + private final RetryCache retryCache; + private final boolean serviceAuthEnabled; + private final RPC.Server serviceRpcServer; + private final InetSocketAddress serviceRPCAddress; + private final RPC.Server lifelineRpcServer; + private final InetSocketAddress lifelineRPCAddress; + protected final RPC.Server clientRpcServer; + protected final InetSocketAddress clientRpcAddress; + private final String minimumDataNodeVersion; + private final String defaultECPolicyName; + + public ECBlockGroupStats getECBlockGroupStats() throws IOException { + if (!this.nn.isStarted()) { + String message = NameNode.composeNotStartedMessage(this.nn.getRole()); + throw new RetriableException(message); + } + namesystem.checkOperation(OperationCategory.READ); + return namesystem.getECBlockGroupStats(); + } +} \ No newline at end of file diff --git a/test/validation/User.java b/test/validation/User.java new file mode 100644 index 00000000..47bbe34b --- /dev/null +++ b/test/validation/User.java @@ -0,0 +1,24 @@ +package com.baeldung.constructorsstaticfactorymethods.entities; +import java.time.LocalTime; +import java.util.logging.ConsoleHandler; +import java.util.logging.Level; +import java.util.logging.Logger; +import java.util.logging.SimpleFormatter; +public class User { + private static volatile User instance = null; + private static final Logger LOGGER = Logger.getLogger(User.class.getName()); + private final String name; + private final String email; + private final String country; + public static User createWithDefaultCountry(String name, String email) { + return new User(name, email, "Argentina"); + } + public static User createWithLoggedInstantiationTime(String name, String email, String country) { + ConsoleHandler handler = new ConsoleHandler(); + handler.setLevel(Level.INFO); + handler.setFormatter(new SimpleFormatter()); + LOGGER.addHandler(handler); + LOGGER.log(Level.INFO, "Creating User instance at : {0}", LocalTime.now()); + return new User(name, email, country); + } +} \ No newline at end of file diff --git a/test/validation/__init__.py b/test/validation/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/validation/test_validation.py b/test/validation/test_validation.py new file mode 100644 index 00000000..b020f87d --- /dev/null +++ b/test/validation/test_validation.py @@ -0,0 +1,84 @@ +from pathlib import Path +from unittest import TestCase + +from veniq.dataset_collection.validation import fix_start_end_lines_for_opportunity, \ + percent_matched + + +class TestValidation(TestCase): + folder = Path(__file__).absolute().parent + + def test_validation_semi_2_closing_brackets_with_2_lines_before_block(self): + file = self.folder / "DynaMenuModel.java" + # range doesn't include the last item + # also, add it as it would be numbered starting at 1 + lines_extracted_by_semi = list(range(91, 109)) + fixed_lines = fix_start_end_lines_for_opportunity( + lines_extracted_by_semi, + str(file) + ) + self.assertEqual((91, 108), fixed_lines) + + def test_validation_semi_2_closing_brackets_without_lines_before_block(self): + file = self.folder / "BaseTextEditor.java" + # range doesn't include the last item + # also, add it as it would be numbered starting at 1 + lines_extracted_by_semi = list(range(58, 62)) + fixed_lines = fix_start_end_lines_for_opportunity( + lines_extracted_by_semi, + str(file) + ) + self.assertEqual((58, 63), fixed_lines) + + def test_semi_no_need_to_find_closing_brackets(self): + file = self.folder / "User.java" + lines_extracted_by_semi = list(range(17, 22)) + fixed_lines = fix_start_end_lines_for_opportunity( + lines_extracted_by_semi, + str(file) + ) + self.assertEqual((17, 21), fixed_lines) + + def test_validation_semi_closing_brackets_with_2_blocks(self): + file = self.folder / "CssPreprocessors.java" + lines_extracted_by_semi = list(range(32, 38)) + fixed_lines = fix_start_end_lines_for_opportunity( + lines_extracted_by_semi, + str(file) + ) + self.assertEqual((32, 39), fixed_lines) + + def test_validation_semi_1_closing_brackets(self): + file = self.folder / "NameNodeRpcServer.java" + lines_extracted_by_semi = list(range(232, 235)) + fixed_lines = fix_start_end_lines_for_opportunity( + lines_extracted_by_semi, + str(file) + ) + self.assertEqual((232, 235), fixed_lines) + + file = self.folder / "MetadataEncoder.java" + lines_extracted_by_semi = list(range(51, 55)) + fixed_lines = fix_start_end_lines_for_opportunity( + lines_extracted_by_semi, + str(file) + ) + self.assertEqual((51, 55), fixed_lines) + + def test_get_percent_matched(self): + semi_lines = list(range(50, 58)) + dataset_lines = list(range(50, 58)) + percent = percent_matched(dataset_lines, semi_lines) + self.assertEqual(percent, 1.0) + + def test_percent_partially_matched(self): + semi_lines = list(range(65, 81)) + dataset_lines = list(range(69, 82)) + percent = percent_matched(dataset_lines, semi_lines) + self.assertEqual(percent, 12 / 13) + + def test_percent_not_matched(self): + semi_lines = list(range(65, 68)) + dataset_lines = list(range(69, 82)) + percent = percent_matched(dataset_lines, semi_lines) + self.assertEqual(percent, 0) diff --git a/veniq/dataset_collection/validation.py b/veniq/dataset_collection/validation.py new file mode 100644 index 00000000..1c2fc206 --- /dev/null +++ b/veniq/dataset_collection/validation.py @@ -0,0 +1,286 @@ +import os +import traceback +from argparse import ArgumentParser +from dataclasses import dataclass, asdict +from functools import partial +from pathlib import Path +from typing import List, Tuple + +import pandas as pd +from numpy import mean +from pebble import ProcessPool +from tqdm import tqdm + +from veniq.ast_framework import AST, ASTNodeType +from veniq.ast_framework import ASTNode +from veniq.baselines.semi.create_extraction_opportunities import create_extraction_opportunities +from veniq.baselines.semi.extract_semantic import extract_method_statements_semantic +from veniq.baselines.semi.filter_extraction_opportunities import filter_extraction_opportunities +from veniq.baselines.semi.rank_extraction_opportunities import rank_extraction_opportunities, ExtractionOpportunityGroup +from veniq.metrics.ncss.ncss import NCSSMetric +from veniq.utils.ast_builder import build_ast +from veniq.utils.encoding_detector import read_text_with_autodetected_encoding + + +def find_extraction_opportunities( + method_ast: AST) -> List[ExtractionOpportunityGroup]: + statements_semantic = extract_method_statements_semantic(method_ast) + extraction_opportunities = create_extraction_opportunities(statements_semantic) + filtered_extraction_opportunities = filter_extraction_opportunities( + extraction_opportunities, statements_semantic, method_ast + ) + extraction_opportunities_groups = rank_extraction_opportunities( + statements_semantic, filtered_extraction_opportunities + ) + + return extraction_opportunities_groups + + +@dataclass +class RowResult: + output_filename: str + input_filename: str + start_line_SEMI: int + end_line_SEMI: int + start_line_dataset: int + end_line_dataset: int + percent_matched: float + class_name: str + method_name: str + error_string: str + ncss: int + matched: bool + failed_cases_in_SEMI_algorithm: bool + no_opportunity_chosen: bool + failed_cases_in_validation_examples: bool + + +def fix_start_end_lines_for_opportunity( + extracted_lines_of_opportunity: List[int], + filepath: str) -> Tuple[int, int]: + """ + Finds start and end lines for opportunity + + :param filepath: filename where opportunity was found + :param extracted_lines_of_opportunity: list of lines for opportunity + :return: list of extracted lines for opportunity + """ + start_line_opportunity = min(extracted_lines_of_opportunity) + end_line_opportunity = max(extracted_lines_of_opportunity) + text = read_text_with_autodetected_encoding(filepath).split('\n') + + extraction = text[start_line_opportunity - 1:end_line_opportunity] + open_brackets = 0 + close_brackets = 0 + for x in extraction: + close_brackets += x.count('}') + open_brackets += x.count('{') + + if open_brackets < close_brackets: + diff = close_brackets - open_brackets + count = 1 + for text_line in text[end_line_opportunity:]: + while diff > 0: + if text_line.find('{') > -1: + diff -= 1 + count += 1 + + start_line_opportunity += count - 1 + + elif open_brackets > close_brackets: + diff = open_brackets - close_brackets + count = 1 + for text_line in text[end_line_opportunity:]: + while diff > 0: + if text_line.find('}') > -1: + diff -= 1 + count += 1 + + end_line_opportunity += count - 1 + + return start_line_opportunity, end_line_opportunity + + +# flake8: noqa: C901 +def validate_row(dataset_dir: Path, row: pd.Series) \ + -> List[RowResult]: + """ + Validate row of dataset + + :param dataset_dir: directory to dataset, path before the relative path in + output_filename + :param row: row of dataframe of synth validation dataset + :return: Stats - return collected stats + """ + results = [] + try: + start_line_of_inserted_block = int(row[1]['inline_insertion_line_start']) + end_line_of_inserted_block = int(row[1]['inline_insertion_line_end']) + + src_filename = row[1]['output_filename'] + class_name = row[1]['class_name'] + full_path = dataset_dir / src_filename + ast = AST.build_from_javalang(build_ast(full_path)) + function_to_analyze = row[1]['method_where_invocation_occurred'] + + for class_decl in ast.get_proxy_nodes(ASTNodeType.CLASS_DECLARATION): + if class_decl.name == class_name: + objects_to_consider = list(class_decl.methods) + list(class_decl.constructors) or [] + for ast_node in objects_to_consider: + result = RowResult( + output_filename=full_path, + input_filename=row[1]['input_filename'], + class_name='Not available', + method_name='', + start_line_SEMI=-1, + end_line_SEMI=-1, + start_line_dataset=start_line_of_inserted_block, + end_line_dataset=end_line_of_inserted_block, + percent_matched=-1.0, + error_string='', + ncss=0, + matched=False, + failed_cases_in_SEMI_algorithm=False, + no_opportunity_chosen=False, + failed_cases_in_validation_examples=False, + ) + if ast_node.name != function_to_analyze: + continue + try: + ast_subtree = ast.get_subtree(ast_node) + print(src_filename, 'start') + opport = find_extraction_opportunities(ast_subtree) + print(src_filename, 'end') + if opport: + print(src_filename, 'start find_matched_lines') + find_matched_lines( + ast_node, + ast_subtree, + class_decl, + start_line_of_inserted_block, + end_line_of_inserted_block, + full_path, + opport, + result) + + print(src_filename, 'end find_matched_lines') + else: + result.no_opportunity_chosen = True + + except Exception as e: + traceback.print_exc() + result.error_string = str(e) + result.failed_cases_in_SEMI_algorithm = True + finally: + results.append(result) + + break + break + + except Exception as e: + traceback.print_exc() + result.error_string = str(e) + result.failed_cases_in_validation_examples = True + results.append(result) + + return results + + +def find_matched_lines( + ast_node: ASTNode, + ast_subtree: AST, + class_decl: ASTNode, + start_line_of_inserted_block: int, + end_line_of_inserted_block: int, + full_path: str, + opportunities_list: List[ExtractionOpportunityGroup], + result: RowResult) -> None: + + if Path(full_path).stem == 'ParametersPickerOperator_cf13c04617679fdf0fe1779623e8a28e41e89e045c640a1f507d166ba1e8370f_verify_111': + print() + best_group = opportunities_list[0] + lines = [node.line for node in best_group._optimal_opportunity] + fixed_lines = fix_start_end_lines_for_opportunity( + lines, + full_path + ) + start_line_opportunity = min(fixed_lines) + end_line_opportunity = max(fixed_lines) + dataset_range_extraction = range( + start_line_of_inserted_block, + end_line_of_inserted_block + 1 + ) + result.class_name = class_decl.name + result.method_name = ast_node.name + result.start_line_SEMI = start_line_opportunity + result.end_line_SEMI = end_line_opportunity + result.ncss = NCSSMetric().value(ast_subtree) + if (start_line_of_inserted_block == start_line_opportunity) \ + and (end_line_of_inserted_block == end_line_opportunity): + result.matched = True + result.percent_matched = percent_matched(dataset_range_extraction, fixed_lines) + + +def percent_matched(dataset_range_lines, semi_range_lines): + lines_intersected = set(dataset_range_lines) & set(semi_range_lines) + return float(len(lines_intersected)) / len(set(dataset_range_lines)) + + +if __name__ == '__main__': + parser = ArgumentParser() + parser.add_argument( + "-d", "--dataset_dir", + help="Path for file with output results", + required=True + ) + parser.add_argument( + "-i", "--csv_input", + help="Path for csv with synth dataset" + ) + system_cores_qty = os.cpu_count() or 1 + parser.add_argument( + "--jobs", + "-j", + type=int, + default=system_cores_qty - 1, + help="Number of processes to spawn. " + "By default one less than number of cores. " + "Be careful to raise it above, machine may stop responding while creating dataset.", + ) + args = parser.parse_args() + dataset_dir = Path(args.dataset_dir) + csv_dataset_filename = Path(args.csv_input) + df = pd.read_csv(csv_dataset_filename) + df = df[df['can_be_parsed']] + + output_df = pd.DataFrame(columns=list(RowResult.__annotations__.keys())) + + with ProcessPool(1) as executor: + validate_row_f = partial(validate_row, dataset_dir) + future = executor.map(validate_row_f, df.iterrows(), timeout=10000, ) + result = future.result() + for index, row in tqdm(df.iterrows(), total=df.shape[0]): + try: + results: List[RowResult] = next(result) + for res in results: + output_df = output_df.append(asdict(res), ignore_index=True) + output_df.to_csv('matched.csv') + except Exception: + print(traceback.format_exc()) + continue + + matched_cases = float(output_df[output_df["matched"]].shape[0]) + failed_cases_in_SEMI_algorithm = output_df[output_df["failed_cases_in_SEMI_algorithm"]].shape[0] + failed_cases_in_validation_examples = output_df[output_df["failed_cases_in_validation_examples"]].shape[0] + no_opportunity_chosen = output_df[output_df["no_opportunity_chosen"]].shape[0] + matched_percent = mean(output_df[output_df["percent_matched"] > 0].percent_matched.values) + print(f'Failed SEMI algorithm errors: {failed_cases_in_SEMI_algorithm}') + print(f'Failed examples of synth dataset: {failed_cases_in_validation_examples}') + print(f'matched_cases: {matched_cases}') + print(f'No opportunity chosen: {no_opportunity_chosen} times') + print(f'Total number of handled cases: {output_df.shape[0]}') + print(f'Average of matched lines: {matched_percent}') + total_case_handled = output_df.shape[0] - failed_cases_in_SEMI_algorithm - failed_cases_in_validation_examples + if total_case_handled > 0: + result = matched_cases / total_case_handled + print(f'Matched {result}% of cases, {matched_cases} out of {total_case_handled}')